PHOENIX-5786 Update phoenix-hive to support current hive versions

            It contains the following patches additional to pre-split master branch:
            PHOENIX-5782 remove slf4j-log4j12 from phoenix-hive jar
            PHOENIX-5619 CREATE TABLE AS SELECT for Phoenix table doesn't work correctly in Hive
            PHOENIX-5552 Hive against Phoenix gets "Expecting "RPAREN", got "L" in Tez mode"
            PHOENIX-5309 Skip adding log4j and slf4j to phoenix-hive jar to avoid logging in hive-server2.err
            PHOENIX-3662 PhoenixStorageHandler throws ClassCastException
            PHOENIX-4794 Support Hive-3.1

        Before the phoenix connectors repo has been created master branch had the following additional
        pathes compared to 4.x. (These are also included)

            PHOENIX-4880 Shade the rest of Phoenix dependencies in phoenix-server.jar
            PHOENIX-4756 Integration tests for PhoenixStorageHandler doesn't work on 5.x branch
            PHOENIX-4739 Update phoenix 5.0 with hive new API getBufferedRowCount
            PHOENIX-4423 Hive 2.3.0 support
            PHOENIX-4570 Explicitly add test dependnecies to phoenix-pherf
            PHOENIX-4403 Workaround Tephra issues and fix all left over compilation issues in phoenix-core(addendum)
            PHOENIX-4321 Replace deprecated HBaseAdmin with Admin
            PHOENIX-4303 Replace HTableInterface,HConnection with Table,Connection interfaces respectively(Rajeshbabu)

        Co-authored-by: Istvan Toth <stoty@apache.org>
        Co-authored-by: Toshihiro Suzuki <brfrn169@gmail.com>
        Co-authored-by: Rajeshbabu Chintaguntla <rajeshbabu@apache.org>
        Co-authored-by: Jeongdae Kim <kjd9306@gmail.com>
        Co-authored-by: Jesus Camacho Rodriguez <jcamacho@apache.org>
        Co-authored-by: Josh Elser <elserj@apache.org>
        Co-authored-by: Sergey Soldatov <ssa@apache.org>
        Co-authored-by: Ankit Singhal <ankitsinghal59@gmail.com>

Closes #21
diff --git a/phoenix-hive3/pom.xml b/phoenix-hive3/pom.xml
new file mode 100644
index 0000000..444f807
--- /dev/null
+++ b/phoenix-hive3/pom.xml
@@ -0,0 +1,329 @@
+<?xml version='1.0'?>
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <groupId>org.apache.phoenix</groupId>
+        <artifactId>phoenix-connectors</artifactId>
+        <version>6.0.0-SNAPSHOT</version>
+    </parent>
+    <artifactId>phoenix-hive3</artifactId>
+    <name>Phoenix - Hive3</name>
+    <properties>
+        <test.tmp.dir>${project.build.directory}/tmp</test.tmp.dir>
+        <netty.version>4.1.47.Final</netty.version>
+        <phoenix.version>5.1.0-SNAPSHOT</phoenix.version>
+        <hbase.version>2.2.4</hbase.version>
+        <hadoop.version>3.0.3</hadoop.version>
+        <avatica.version>1.12.0</avatica.version>
+        <hive3.version>3.1.2</hive3.version>
+        <curator.version>4.0.0</curator.version>
+        <tez.version>0.9.1</tez.version>
+        <jetty.version>9.3.8.v20160314</jetty.version>
+        <jdk.version>1.8</jdk.version>
+    </properties>
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.phoenix</groupId>
+            <artifactId>phoenix-core</artifactId>
+            <exclusions>
+                <exclusion>
+                    <groupId>com.google.guava</groupId>
+                    <artifactId>guava</artifactId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hive</groupId>
+            <artifactId>hive-cli</artifactId>
+            <version>${hive3.version}</version>
+            <scope>provided</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hive</groupId>
+            <artifactId>hive-exec</artifactId>
+            <version>${hive3.version}</version>
+            <scope>provided</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-common</artifactId>
+            <scope>provided</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hive</groupId>
+            <artifactId>hive-metastore</artifactId>
+            <version>${hive3.version}</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hive</groupId>
+            <artifactId>hive-standalone-metastore</artifactId>
+            <type>test-jar</type>
+            <version>${hive3.version}</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-api</artifactId>
+            <version>${slf4j.version}</version>
+            <scope>provided</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-log4j12</artifactId>
+            <version>${slf4j.version}</version>
+            <scope>provided</scope>
+        </dependency>
+        <dependency>
+            <groupId>log4j</groupId>
+            <artifactId>log4j</artifactId>
+            <version>${log4j.version}</version>
+            <scope>provided</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-mapreduce-client-core</artifactId>
+            <exclusions>
+                <exclusion>
+                    <groupId>io.netty</groupId>
+                    <artifactId>netty</artifactId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+        <dependency>
+            <groupId>io.netty</groupId>
+            <artifactId>netty-all</artifactId>
+            <version>${netty.version}</version>
+        </dependency>
+
+        <!-- Test dependencies -->
+        <dependency>
+            <groupId>org.apache.phoenix</groupId>
+            <artifactId>phoenix-core</artifactId>
+            <classifier>tests</classifier>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hbase</groupId>
+            <artifactId>hbase-testing-util</artifactId>
+            <scope>test</scope>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hbase</groupId>
+            <artifactId>hbase-it</artifactId>
+            <type>test-jar</type>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-hdfs</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-hdfs</artifactId>
+            <type>test-jar</type>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-auth</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-mapreduce-client-common</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-minicluster</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.tez</groupId>
+            <artifactId>tez-tests</artifactId>
+            <scope>test</scope>
+            <version>${tez.version}</version>
+            <type>test-jar</type>
+            <exclusions>
+                <exclusion>
+                    <groupId>org.apache.hadoop</groupId>
+                    <artifactId>hadoop-yarn-api</artifactId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.tez</groupId>
+            <artifactId>tez-dag</artifactId>
+            <scope>test</scope>
+            <version>${tez.version}</version>
+            <exclusions>
+                <exclusion>
+                    <groupId>org.apache.hadoop</groupId>
+                    <artifactId>hadoop-yarn-api</artifactId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+        <dependency>
+            <groupId>org.eclipse.jetty</groupId>
+            <artifactId>jetty-util</artifactId>
+            <scope>test</scope>
+            <version>${jetty.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.eclipse.jetty</groupId>
+            <artifactId>jetty-http</artifactId>
+            <scope>test</scope>
+            <version>${jetty.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.eclipse.jetty</groupId>
+            <artifactId>jetty-server</artifactId>
+            <scope>test</scope>
+            <version>${jetty.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.mockito</groupId>
+            <artifactId>mockito-all</artifactId>
+            <version>${mockito-all.version}</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+            <version>19.0</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.calcite.avatica</groupId>
+            <artifactId>avatica</artifactId>
+            <!-- Overriding the version of Avatica that PQS uses so that Hive will work -->
+            <version>${avatica.version}</version>
+            <scope>test</scope>
+            <!-- And removing a bunch of dependencies that haven't been shaded in this older
+                 Avatica version which conflict with HDFS -->
+            <exclusions>
+                <exclusion>
+                    <groupId>org.hsqldb</groupId>
+                    <artifactId>hsqldb</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>com.fasterxml.jackson.core</groupId>
+                    <artifactId>jackson-databind</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>com.fasterxml.jackson.core</groupId>
+                    <artifactId>jackson-annotations</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>com.fasterxml.jackson.core</groupId>
+                    <artifactId>jackson-core</artifactId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.codehaus.mojo</groupId>
+                <artifactId>build-helper-maven-plugin</artifactId>
+            </plugin>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-failsafe-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <id>HBaseManagedTimeTests</id>
+                        <configuration>
+                            <encoding>UTF-8</encoding>
+                            <forkCount>1</forkCount>
+                            <runOrder>alphabetical</runOrder>
+                            <reuseForks>false</reuseForks>
+                            <argLine>-enableassertions -Xmx2000m -XX:MaxPermSize=256m
+                                -Djava.security.egd=file:/dev/./urandom
+                                "-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}"
+                                -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./target/
+                                -Dorg.apache.hadoop.hbase.shaded.io.netty.packagePrefix=org.apache.hadoop.hbase.shaded.
+                            </argLine>
+                            <redirectTestOutputToFile>${test.output.tofile}
+                            </redirectTestOutputToFile>
+                            <testSourceDirectory>${basedir}/src/it/java</testSourceDirectory>
+                            <groups>org.apache.phoenix.end2end.HBaseManagedTimeTest</groups>
+                            <shutdown>kill</shutdown>
+                            <useSystemClassLoader>false</useSystemClassLoader>
+                        </configuration>
+                        <goals>
+                            <goal>integration-test</goal>
+                            <goal>verify</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <artifactId>maven-dependency-plugin</artifactId>
+                <version>${maven-dependency-plugin.version}</version>
+                <executions>
+                    <execution>
+                        <id>copy-dependencies</id>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>copy-dependencies</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <artifactId>maven-assembly-plugin</artifactId>
+                <configuration>
+                    <descriptorRefs>
+                        <descriptorRef>jar-with-dependencies</descriptorRef>
+                    </descriptorRefs>
+                </configuration>
+                <executions>
+                    <execution>
+                        <id>make-jar-with-dependencies</id>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>single</goal>
+                        </goals>
+                        <configuration>
+                            <appendAssemblyId>false</appendAssemblyId>
+                            <finalName>phoenix-${project.version}-hive</finalName>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+</project>
diff --git a/phoenix-hive3/src/it/java/org/apache/hadoop/hive/ql/QTestProcessExecResult.java b/phoenix-hive3/src/it/java/org/apache/hadoop/hive/ql/QTestProcessExecResult.java
new file mode 100644
index 0000000..f9f7057
--- /dev/null
+++ b/phoenix-hive3/src/it/java/org/apache/hadoop/hive/ql/QTestProcessExecResult.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql;
+
+/**
+ * Standard output and return code of a process executed during the qtests.
+ */
+public class QTestProcessExecResult {
+
+  private static final String TRUNCATED_OUTPUT = "Output was too long and had to be truncated...";
+  private static final short MAX_OUTPUT_CHAR_LENGTH = 2000;
+
+  private final int returnCode;
+  private final String standardOut;
+
+  QTestProcessExecResult(int code, String output) {
+    this.returnCode = code;
+    this.standardOut = truncatefNeeded(output);
+  }
+
+  /**
+   * @return executed process return code
+   */
+  public int getReturnCode() {
+    return this.returnCode;
+  }
+
+  /**
+   * @return output captured from stdout while process was executing
+   */
+  public String getCapturedOutput() {
+    return this.standardOut;
+  }
+
+  public static QTestProcessExecResult create(int code, String output) {
+    return new QTestProcessExecResult(code, output);
+  }
+
+  public static  QTestProcessExecResult createWithoutOutput(int code) {
+    return new QTestProcessExecResult(code, "");
+  }
+
+  private String truncatefNeeded(String orig) {
+    if (orig.length() > MAX_OUTPUT_CHAR_LENGTH) {
+      return orig.substring(0, MAX_OUTPUT_CHAR_LENGTH) + "\r\n" + TRUNCATED_OUTPUT;
+    } else {
+      return orig;
+    }
+  }
+}
diff --git a/phoenix-hive3/src/it/java/org/apache/hadoop/hive/ql/QTestUtil.java b/phoenix-hive3/src/it/java/org/apache/hadoop/hive/ql/QTestUtil.java
new file mode 100644
index 0000000..4450047
--- /dev/null
+++ b/phoenix-hive3/src/it/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -0,0 +1,2490 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql;
+
+import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.PrintStream;
+import java.io.Serializable;
+import java.io.StringWriter;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.FileSystems;
+import java.nio.file.Files;
+import java.nio.file.StandardOpenOption;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Deque;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Stream;
+
+import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.io.output.ByteArrayOutputStream;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
+import org.apache.hadoop.hive.cli.CliDriver;
+import org.apache.hadoop.hive.cli.CliSessionState;
+import org.apache.hadoop.hive.common.io.CachingPrintStream;
+import org.apache.hadoop.hive.common.io.DigestPrintStream;
+import org.apache.hadoop.hive.common.io.SortAndDigestPrintStream;
+import org.apache.hadoop.hive.common.io.SortPrintStream;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.llap.io.api.LlapProxy;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache;
+import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.exec.spark.session.SparkSession;
+import org.apache.hadoop.hive.ql.exec.spark.session.SparkSessionManagerImpl;
+import org.apache.hadoop.hive.ql.exec.tez.TezSessionState;
+import org.apache.hadoop.hive.ql.lockmgr.zookeeper.CuratorFrameworkSingleton;
+import org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry;
+import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.ASTNode;
+import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
+import org.apache.hadoop.hive.ql.parse.ParseDriver;
+import org.apache.hadoop.hive.ql.parse.ParseException;
+import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.processors.CommandProcessor;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.processors.HiveCommand;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.shims.HadoopShims;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hive.common.util.StreamPrinter;
+import org.apache.logging.log4j.util.Strings;
+import org.apache.tools.ant.BuildException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.ZooKeeper;
+import org.junit.Assert;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Throwables;
+import com.google.common.collect.ImmutableList;
+
+import junit.framework.TestSuite;
+
+/**
+ * QTestUtil. Cloned from Hive 3.0.0 as hive doesn't release hive-it-util artifact
+ *
+ */
+public class QTestUtil {
+  public static final String UTF_8 = "UTF-8";
+  public static final String HIVE_ROOT = getHiveRoot();
+  // security property names
+  private static final String SECURITY_KEY_PROVIDER_URI_NAME = "dfs.encryption.key.provider.uri";
+  private static final String CRLF = System.getProperty("line.separator");
+
+  public static final String QTEST_LEAVE_FILES = "QTEST_LEAVE_FILES";
+  private static final Logger LOG = LoggerFactory.getLogger("QTestUtil");
+  private final static String defaultInitScript = "q_test_init.sql";
+  private final static String defaultCleanupScript = "q_test_cleanup.sql";
+  private final String[] testOnlyCommands = new String[]{"crypto"};
+
+  private static final String TEST_TMP_DIR_PROPERTY = "test.tmp.dir"; // typically target/tmp
+  private static final String BUILD_DIR_PROPERTY = "build.dir"; // typically target
+
+  public static final String PATH_HDFS_REGEX = "(hdfs://)([a-zA-Z0-9:/_\\-\\.=])+";
+  public static final String PATH_HDFS_WITH_DATE_USER_GROUP_REGEX = "([a-z]+) ([a-z]+)([ ]+)([0-9]+) ([0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}) " + PATH_HDFS_REGEX;
+
+  private String testWarehouse;
+  private final String testFiles;
+  protected final String outDir;
+  protected String overrideResultsDir;
+  protected final String logDir;
+  private final TreeMap<String, String> qMap;
+  private final Set<String> qSkipSet;
+  private final Set<String> qSortSet;
+  private final Set<String> qSortQuerySet;
+  private final Set<String> qHashQuerySet;
+  private final Set<String> qSortNHashQuerySet;
+  private final Set<String> qNoSessionReuseQuerySet;
+  private final Set<String> qJavaVersionSpecificOutput;
+  private static final String SORT_SUFFIX = ".sorted";
+  private final Set<String> srcTables;
+  private final Set<String> srcUDFs;
+  private final MiniClusterType clusterType;
+  private final FsType fsType;
+  private ParseDriver pd;
+  protected Hive db;
+  protected QueryState queryState;
+  protected HiveConf conf;
+  private IDriver drv;
+  private BaseSemanticAnalyzer sem;
+  protected final boolean overWrite;
+  private CliDriver cliDriver;
+  private HadoopShims.MiniMrShim mr = null;
+  private HadoopShims.MiniDFSShim dfs = null;
+  private FileSystem fs;
+  private HadoopShims.HdfsEncryptionShim hes = null;
+  private String hadoopVer = null;
+  private QTestSetup setup = null;
+  private SparkSession sparkSession = null;
+  private boolean isSessionStateStarted = false;
+  private static final String javaVersion = getJavaVersion();
+
+  private final String initScript;
+  private final String cleanupScript;
+
+
+  public interface SuiteAddTestFunctor {
+    public void addTestToSuite(TestSuite suite, Object setup, String tName);
+  }
+
+  public static Set<String> getSrcTables() {
+    HashSet<String> srcTables = new HashSet<String>();
+    // FIXME: moved default value to here...for now
+    // i think this features is never really used from the command line
+    String defaultTestSrcTables = "src,src1,srcbucket,srcbucket2,src_json,src_thrift," +
+        "src_sequencefile,srcpart,alltypesorc,src_hbase,cbo_t1,cbo_t2,cbo_t3,src_cbo,part," +
+        "lineitem,alltypesparquet";
+    for (String srcTable : System.getProperty("test.src.tables", defaultTestSrcTables).trim().split(",")) {
+      srcTable = srcTable.trim();
+      if (!srcTable.isEmpty()) {
+        srcTables.add(srcTable);
+      }
+    }
+    if (srcTables.isEmpty()) {
+      throw new RuntimeException("Source tables cannot be empty");
+    }
+    return srcTables;
+  }
+
+  /**
+   * Returns the default UDF names which should not be removed when resetting the test database
+   * @return The list of the UDF names not to remove
+   */
+  private Set<String> getSrcUDFs() {
+    HashSet<String> srcUDFs = new HashSet<String>();
+    // FIXME: moved default value to here...for now
+    // i think this features is never really used from the command line
+    String defaultTestSrcUDFs = "qtest_get_java_boolean";
+    for (String srcUDF : System.getProperty("test.src.udfs", defaultTestSrcUDFs).trim().split(","))
+    {
+      srcUDF = srcUDF.trim();
+      if (!srcUDF.isEmpty()) {
+        srcUDFs.add(srcUDF);
+      }
+    }
+    if (srcUDFs.isEmpty()) {
+      throw new RuntimeException("Source UDFs cannot be empty");
+    }
+    return srcUDFs;
+  }
+
+
+
+  public HiveConf getConf() {
+    return conf;
+  }
+
+  public boolean deleteDirectory(File path) {
+    if (path.exists()) {
+      File[] files = path.listFiles();
+      for (File file : files) {
+        if (file.isDirectory()) {
+          deleteDirectory(file);
+        } else {
+          file.delete();
+        }
+      }
+    }
+    return (path.delete());
+  }
+
+  public void copyDirectoryToLocal(Path src, Path dest) throws Exception {
+
+    FileSystem srcFs = src.getFileSystem(conf);
+    FileSystem destFs = dest.getFileSystem(conf);
+    if (srcFs.exists(src)) {
+      FileStatus[] files = srcFs.listStatus(src);
+      for (FileStatus file : files) {
+        String name = file.getPath().getName();
+        Path dfs_path = file.getPath();
+        Path local_path = new Path(dest, name);
+
+        // If this is a source table we do not copy it out
+        if (srcTables.contains(name)) {
+          continue;
+        }
+
+        if (file.isDirectory()) {
+          if (!destFs.exists(local_path)) {
+            destFs.mkdirs(local_path);
+          }
+          copyDirectoryToLocal(dfs_path, local_path);
+        } else {
+          srcFs.copyToLocalFile(dfs_path, local_path);
+        }
+      }
+    }
+  }
+
+  static Pattern mapTok = Pattern.compile("(\\.?)(.*)_map_(.*)");
+  static Pattern reduceTok = Pattern.compile("(.*)(reduce_[^\\.]*)((\\..*)?)");
+
+  public void normalizeNames(File path) throws Exception {
+    if (path.isDirectory()) {
+      File[] files = path.listFiles();
+      for (File file : files) {
+        normalizeNames(file);
+      }
+    } else {
+      Matcher m = reduceTok.matcher(path.getName());
+      if (m.matches()) {
+        String name = m.group(1) + "reduce" + m.group(3);
+        path.renameTo(new File(path.getParent(), name));
+      } else {
+        m = mapTok.matcher(path.getName());
+        if (m.matches()) {
+          String name = m.group(1) + "map_" + m.group(3);
+          path.renameTo(new File(path.getParent(), name));
+        }
+      }
+    }
+  }
+
+  public String getOutputDirectory() {
+    return outDir;
+  }
+
+  public String getLogDirectory() {
+    return logDir;
+  }
+
+  private String getHadoopMainVersion(String input) {
+    if (input == null) {
+      return null;
+    }
+    Pattern p = Pattern.compile("^(\\d+\\.\\d+).*");
+    Matcher m = p.matcher(input);
+    if (m.matches()) {
+      return m.group(1);
+    }
+    return null;
+  }
+
+  public void initConf() throws Exception {
+
+    String vectorizationEnabled = System.getProperty("test.vectorization.enabled");
+    if(vectorizationEnabled != null && vectorizationEnabled.equalsIgnoreCase("true")) {
+      conf.setBoolVar(ConfVars.HIVE_VECTORIZATION_ENABLED, true);
+    }
+
+    // Plug verifying metastore in for testing DirectSQL.
+    conf.setVar(ConfVars.METASTORE_RAW_STORE_IMPL,
+        "org.apache.hadoop.hive.metastore.VerifyingObjectStore");
+
+    if (mr != null) {
+      mr.setupConfiguration(conf);
+
+      // TODO Ideally this should be done independent of whether mr is setup or not.
+      setFsRelatedProperties(conf, fs.getScheme().equals("file"),fs);
+    }
+    conf.set(ConfVars.HIVE_EXECUTION_ENGINE.varname, clusterType.name());
+  }
+
+  private void setFsRelatedProperties(HiveConf conf, boolean isLocalFs, FileSystem fs) {
+    String fsUriString = fs.getUri().toString();
+
+    // Different paths if running locally vs a remote fileSystem. Ideally this difference should not exist.
+    Path warehousePath;
+    Path jarPath;
+    Path userInstallPath;
+    if (isLocalFs) {
+      String buildDir = System.getProperty(BUILD_DIR_PROPERTY);
+      Preconditions.checkState(Strings.isNotBlank(buildDir));
+      Path path = new Path(fsUriString, buildDir);
+
+      // Create a fake fs root for local fs
+      Path localFsRoot  = new Path(path, "localfs");
+      warehousePath = new Path(localFsRoot, "warehouse");
+      jarPath = new Path(localFsRoot, "jar");
+      userInstallPath = new Path(localFsRoot, "user_install");
+    } else {
+      // TODO Why is this changed from the default in hive-conf?
+      warehousePath = new Path(fsUriString, "/build/ql/test/data/warehouse/");
+      jarPath = new Path(new Path(fsUriString, "/user"), "hive");
+      userInstallPath = new Path(fsUriString, "/user");
+    }
+
+    warehousePath = fs.makeQualified(warehousePath);
+    jarPath = fs.makeQualified(jarPath);
+    userInstallPath = fs.makeQualified(userInstallPath);
+
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsUriString);
+
+    // Remote dirs
+    conf.setVar(ConfVars.METASTOREWAREHOUSE, warehousePath.toString());
+    conf.setVar(ConfVars.HIVE_JAR_DIRECTORY, jarPath.toString());
+    conf.setVar(ConfVars.HIVE_USER_INSTALL_DIR, userInstallPath.toString());
+    // ConfVars.SCRATCHDIR - {test.tmp.dir}/scratchdir
+
+    // Local dirs
+    // ConfVars.LOCALSCRATCHDIR - {test.tmp.dir}/localscratchdir
+
+    // TODO Make sure to cleanup created dirs.
+  }
+
+  private void createRemoteDirs() {
+    assert fs != null;
+    Path warehousePath = fs.makeQualified(new Path(conf.getVar(ConfVars.METASTOREWAREHOUSE)));
+    assert warehousePath != null;
+    Path hiveJarPath = fs.makeQualified(new Path(conf.getVar(ConfVars.HIVE_JAR_DIRECTORY)));
+    assert hiveJarPath != null;
+    Path userInstallPath = fs.makeQualified(new Path(conf.getVar(ConfVars.HIVE_USER_INSTALL_DIR)));
+    assert userInstallPath != null;
+    try {
+      fs.mkdirs(warehousePath);
+    } catch (IOException e) {
+      LOG.error("Failed to create path={}. Continuing. Exception message={}", warehousePath,
+          e.getMessage());
+    }
+    try {
+      fs.mkdirs(hiveJarPath);
+    } catch (IOException e) {
+      LOG.error("Failed to create path={}. Continuing. Exception message={}", warehousePath,
+          e.getMessage());
+    }
+    try {
+      fs.mkdirs(userInstallPath);
+    } catch (IOException e) {
+      LOG.error("Failed to create path={}. Continuing. Exception message={}", warehousePath,
+          e.getMessage());
+    }
+  }
+
+  private enum CoreClusterType {
+    MR,
+    TEZ,
+    SPARK,
+    DRUID
+  }
+
+  public enum FsType {
+    local,
+    hdfs,
+    encrypted_hdfs,
+  }
+
+  public enum MiniClusterType {
+
+    mr(CoreClusterType.MR, FsType.hdfs),
+    tez(CoreClusterType.TEZ, FsType.hdfs),
+    tez_local(CoreClusterType.TEZ, FsType.local),
+    spark(CoreClusterType.SPARK, FsType.local),
+    miniSparkOnYarn(CoreClusterType.SPARK, FsType.hdfs),
+    llap(CoreClusterType.TEZ, FsType.hdfs),
+    llap_local(CoreClusterType.TEZ, FsType.local),
+    none(CoreClusterType.MR, FsType.local),
+    druid(CoreClusterType.DRUID, FsType.hdfs);
+
+
+    private final CoreClusterType coreClusterType;
+    private final FsType defaultFsType;
+
+    MiniClusterType(CoreClusterType coreClusterType, FsType defaultFsType) {
+      this.coreClusterType = coreClusterType;
+      this.defaultFsType = defaultFsType;
+    }
+
+    public CoreClusterType getCoreClusterType() {
+      return coreClusterType;
+    }
+
+    public FsType getDefaultFsType() {
+      return defaultFsType;
+    }
+
+    public static MiniClusterType valueForString(String type) {
+      // Replace this with valueOf.
+      if (type.equals("miniMR")) {
+        return mr;
+      } else if (type.equals("tez")) {
+        return tez;
+      } else if (type.equals("tez_local")) {
+        return tez_local;
+      } else if (type.equals("spark")) {
+        return spark;
+      } else if (type.equals("miniSparkOnYarn")) {
+        return miniSparkOnYarn;
+      } else if (type.equals("llap")) {
+        return llap;
+      } else if (type.equals("llap_local")) {
+        return llap_local;
+      } else if (type.equals("druid")) {
+      return druid;
+      } else {
+        return none;
+      }
+    }
+  }
+
+
+  private String getKeyProviderURI() {
+    // Use the target directory if it is not specified
+    String keyDir = HIVE_ROOT + "ql/target/";
+
+    // put the jks file in the current test path only for test purpose
+    return "jceks://file" + new Path(keyDir, "test.jks").toUri();
+  }
+
+  public QTestUtil(String outDir, String logDir, MiniClusterType clusterType,
+                   String confDir, String hadoopVer, String initScript, String cleanupScript,
+                   boolean withLlapIo) throws Exception {
+    this(outDir, logDir, clusterType, confDir, hadoopVer, initScript, cleanupScript,
+        withLlapIo, null);
+  }
+
+  public QTestUtil(String outDir, String logDir, MiniClusterType clusterType,
+                   String confDir, String hadoopVer, String initScript, String cleanupScript,
+                   boolean withLlapIo, FsType fsType)
+    throws Exception {
+    LOG.info("Setting up QTestUtil with outDir="+outDir+", logDir="+logDir+", clusterType="+clusterType+", confDir="+confDir+"," +
+        " hadoopVer="+hadoopVer+", initScript="+initScript+", cleanupScript="+cleanupScript+", withLlapIo="+withLlapIo+"," +
+            " fsType="+fsType+"");
+    Preconditions.checkNotNull(clusterType, "ClusterType cannot be null");
+    if (fsType != null) {
+      this.fsType = fsType;
+    } else {
+      this.fsType = clusterType.getDefaultFsType();
+    }
+    this.outDir = outDir;
+    this.logDir = logDir;
+    this.srcTables=getSrcTables();
+    this.srcUDFs = getSrcUDFs();
+
+    // HIVE-14443 move this fall-back logic to CliConfigs
+    if (confDir != null && !confDir.isEmpty()) {
+      HiveConf.setHiveSiteLocation(new URL("file://"+ new File(confDir).toURI().getPath() + "/hive-site.xml"));
+      MetastoreConf.setHiveSiteLocation(HiveConf.getHiveSiteLocation());
+      System.out.println("Setting hive-site: "+ HiveConf.getHiveSiteLocation());
+    }
+
+    queryState = new QueryState.Builder().withHiveConf(new HiveConf(IDriver.class)).build();
+    conf = queryState.getConf();
+    this.hadoopVer = getHadoopMainVersion(hadoopVer);
+    qMap = new TreeMap<String, String>();
+    qSkipSet = new HashSet<String>();
+    qSortSet = new HashSet<String>();
+    qSortQuerySet = new HashSet<String>();
+    qHashQuerySet = new HashSet<String>();
+    qSortNHashQuerySet = new HashSet<String>();
+    qNoSessionReuseQuerySet = new HashSet<String>();
+    qJavaVersionSpecificOutput = new HashSet<String>();
+    this.clusterType = clusterType;
+
+    HadoopShims shims = ShimLoader.getHadoopShims();
+
+    setupFileSystem(shims);
+
+    setup = new QTestSetup();
+    setup.preTest(conf);
+
+    setupMiniCluster(shims, confDir);
+
+    initConf();
+
+    if (withLlapIo && (clusterType == MiniClusterType.none)) {
+      LOG.info("initializing llap IO");
+      LlapProxy.initializeLlapIo(conf);
+    }
+
+
+    // Use the current directory if it is not specified
+    String dataDir = conf.get("test.data.files");
+    if (dataDir == null) {
+      dataDir = new File(".").getAbsolutePath() + "/data/files";
+    }
+    testFiles = dataDir;
+
+    // Use the current directory if it is not specified
+    String scriptsDir = conf.get("test.data.scripts");
+    if (scriptsDir == null) {
+      scriptsDir = new File(".").getAbsolutePath() + "/data/scripts";
+    }
+
+    this.initScript = scriptsDir + File.separator + initScript;
+    this.cleanupScript = scriptsDir + File.separator + cleanupScript;
+
+    overWrite = "true".equalsIgnoreCase(System.getProperty("test.output.overwrite"));
+
+    init();
+  }
+
+  private void setupFileSystem(HadoopShims shims) throws IOException {
+
+    if (fsType == FsType.local) {
+      fs = FileSystem.getLocal(conf);
+    } else if (fsType == FsType.hdfs || fsType == FsType.encrypted_hdfs) {
+      int numDataNodes = 4;
+
+      if (fsType == FsType.encrypted_hdfs) {
+        // Set the security key provider so that the MiniDFS cluster is initialized
+        // with encryption
+        conf.set(SECURITY_KEY_PROVIDER_URI_NAME, getKeyProviderURI());
+        conf.setInt("fs.trash.interval", 50);
+
+        dfs = shims.getMiniDfs(conf, numDataNodes, true, null);
+        fs = dfs.getFileSystem();
+
+        // set up the java key provider for encrypted hdfs cluster
+        hes = shims.createHdfsEncryptionShim(fs, conf);
+
+        LOG.info("key provider is initialized");
+      } else {
+        dfs = shims.getMiniDfs(conf, numDataNodes, true, null);
+        fs = dfs.getFileSystem();
+      }
+    } else {
+      throw new IllegalArgumentException("Unknown or unhandled fsType [" + fsType + "]");
+    }
+  }
+
+  private void setupMiniCluster(HadoopShims shims, String confDir) throws
+          IOException {
+
+    String uriString = fs.getUri().toString();
+
+    if (clusterType.getCoreClusterType() == CoreClusterType.TEZ) {
+        if (confDir != null && !confDir.isEmpty()) {
+          conf.addResource(new URL("file://" + new File(confDir).toURI().getPath()
+              + "/tez-site.xml"));
+        }
+        int numTrackers = 2;
+        if (EnumSet.of(MiniClusterType.llap_local, MiniClusterType.tez_local).contains(clusterType)) {
+          mr = shims.getLocalMiniTezCluster(conf, clusterType == MiniClusterType.llap_local);
+        } else {
+          mr = shims.getMiniTezCluster(conf, numTrackers, uriString,
+              EnumSet.of(MiniClusterType.llap, MiniClusterType.llap_local).contains(clusterType));
+        }
+      } else if (clusterType == MiniClusterType.miniSparkOnYarn) {
+        mr = shims.getMiniSparkCluster(conf, 2, uriString, 1);
+      } else if (clusterType == MiniClusterType.mr) {
+        mr = shims.getMiniMrCluster(conf, 2, uriString, 1);
+      }
+  }
+
+
+  public void shutdown() throws Exception {
+    if (System.getenv(QTEST_LEAVE_FILES) == null) {
+      cleanUp();
+    }
+
+    if (clusterType.getCoreClusterType() == CoreClusterType.TEZ) {
+      SessionState.get().getTezSession().destroy();
+    }
+    
+    setup.tearDown();
+    if (sparkSession != null) {
+      try {
+        SparkSessionManagerImpl.getInstance().closeSession(sparkSession);
+      } catch (Exception ex) {
+        LOG.error("Error closing spark session.", ex);
+      } finally {
+        sparkSession = null;
+      }
+    }
+    if (mr != null) {
+      mr.shutdown();
+      mr = null;
+    }
+    FileSystem.closeAll();
+    if (dfs != null) {
+      dfs.shutdown();
+      dfs = null;
+    }
+    Hive.closeCurrent();
+  }
+
+  public String readEntireFileIntoString(File queryFile) throws IOException {
+    InputStreamReader isr = new InputStreamReader(
+        new BufferedInputStream(new FileInputStream(queryFile)), QTestUtil.UTF_8);
+    StringWriter sw = new StringWriter();
+    try {
+      IOUtils.copy(isr, sw);
+    } finally {
+      if (isr != null) {
+        isr.close();
+      }
+    }
+    return sw.toString();
+  }
+
+  public void addFile(String queryFile) throws IOException {
+    addFile(queryFile, false);
+  }
+
+  public void addFile(String queryFile, boolean partial) throws IOException {
+    addFile(new File(queryFile));
+  }
+
+  public void addFile(File qf) throws IOException {
+    addFile(qf, false);
+  }
+
+  public void addFile(File qf, boolean partial) throws IOException {
+    String query = readEntireFileIntoString(qf);
+    qMap.put(qf.getName(), query);
+    if (partial) {
+      return;
+    }
+
+    if(checkHadoopVersionExclude(qf.getName(), query)) {
+      qSkipSet.add(qf.getName());
+    }
+
+    if (checkNeedJavaSpecificOutput(qf.getName(), query)) {
+      qJavaVersionSpecificOutput.add(qf.getName());
+    }
+
+    if (matches(SORT_BEFORE_DIFF, query)) {
+      qSortSet.add(qf.getName());
+    } else if (matches(SORT_QUERY_RESULTS, query)) {
+      qSortQuerySet.add(qf.getName());
+    } else if (matches(HASH_QUERY_RESULTS, query)) {
+      qHashQuerySet.add(qf.getName());
+    } else if (matches(SORT_AND_HASH_QUERY_RESULTS, query)) {
+      qSortNHashQuerySet.add(qf.getName());
+    }
+    if (matches(NO_SESSION_REUSE, query)) {
+      qNoSessionReuseQuerySet.add(qf.getName());
+    }
+  }
+
+  private static final Pattern SORT_BEFORE_DIFF = Pattern.compile("-- SORT_BEFORE_DIFF");
+  private static final Pattern SORT_QUERY_RESULTS = Pattern.compile("-- SORT_QUERY_RESULTS");
+  private static final Pattern HASH_QUERY_RESULTS = Pattern.compile("-- HASH_QUERY_RESULTS");
+  private static final Pattern SORT_AND_HASH_QUERY_RESULTS = Pattern.compile("-- SORT_AND_HASH_QUERY_RESULTS");
+  private static final Pattern NO_SESSION_REUSE = Pattern.compile("-- NO_SESSION_REUSE");
+
+  private boolean matches(Pattern pattern, String query) {
+    Matcher matcher = pattern.matcher(query);
+    if (matcher.find()) {
+      return true;
+    }
+    return false;
+  }
+
+  private boolean checkHadoopVersionExclude(String fileName, String query){
+
+    // Look for a hint to not run a test on some Hadoop versions
+    Pattern pattern = Pattern.compile("-- (EX|IN)CLUDE_HADOOP_MAJOR_VERSIONS\\((.*)\\)");
+
+    boolean excludeQuery = false;
+    boolean includeQuery = false;
+    Set<String> versionSet = new HashSet<String>();
+    String hadoopVer = ShimLoader.getMajorVersion();
+
+    Matcher matcher = pattern.matcher(query);
+
+    // Each qfile may include at most one INCLUDE or EXCLUDE directive.
+    //
+    // If a qfile contains an INCLUDE directive, and hadoopVer does
+    // not appear in the list of versions to include, then the qfile
+    // is skipped.
+    //
+    // If a qfile contains an EXCLUDE directive, and hadoopVer is
+    // listed in the list of versions to EXCLUDE, then the qfile is
+    // skipped.
+    //
+    // Otherwise, the qfile is included.
+
+    if (matcher.find()) {
+
+      String prefix = matcher.group(1);
+      if ("EX".equals(prefix)) {
+        excludeQuery = true;
+      } else {
+        includeQuery = true;
+      }
+
+      String versions = matcher.group(2);
+      for (String s : versions.split("\\,")) {
+        s = s.trim();
+        versionSet.add(s);
+      }
+    }
+
+    if (matcher.find()) {
+      //2nd match is not supposed to be there
+      String message = "QTestUtil: qfile " + fileName
+        + " contains more than one reference to (EX|IN)CLUDE_HADOOP_MAJOR_VERSIONS";
+      throw new UnsupportedOperationException(message);
+    }
+
+    if (excludeQuery && versionSet.contains(hadoopVer)) {
+      System.out.println("QTestUtil: " + fileName
+        + " EXCLUDE list contains Hadoop Version " + hadoopVer + ". Skipping...");
+      return true;
+    } else if (includeQuery && !versionSet.contains(hadoopVer)) {
+      System.out.println("QTestUtil: " + fileName
+        + " INCLUDE list does not contain Hadoop Version " + hadoopVer + ". Skipping...");
+      return true;
+    }
+    return false;
+  }
+
+  private boolean checkNeedJavaSpecificOutput(String fileName, String query) {
+    Pattern pattern = Pattern.compile("-- JAVA_VERSION_SPECIFIC_OUTPUT");
+    Matcher matcher = pattern.matcher(query);
+    if (matcher.find()) {
+      System.out.println("Test is flagged to generate Java version specific " +
+          "output. Since we are using Java version " + javaVersion +
+          ", we will generated Java " + javaVersion + " specific " +
+          "output file for query file " + fileName);
+      return true;
+    }
+
+    return false;
+  }
+
+  /**
+   * Get formatted Java version to include minor version, but
+   * exclude patch level.
+   *
+   * @return Java version formatted as major_version.minor_version
+   */
+  private static String getJavaVersion() {
+    String version = System.getProperty("java.version");
+    if (version == null) {
+      throw new NullPointerException("No java version could be determined " +
+          "from system properties");
+    }
+
+    // "java version" system property is formatted
+    // major_version.minor_version.patch_level.
+    // Find second dot, instead of last dot, to be safe
+    int pos = version.indexOf('.');
+    pos = version.indexOf('.', pos + 1);
+    return version.substring(0, pos);
+  }
+
+  /**
+   * Clear out any side effects of running tests
+   */
+  public void clearPostTestEffects() throws Exception {
+    setup.postTest(conf);
+  }
+
+  public void clearKeysCreatedInTests() {
+    if (hes == null) {
+      return;
+    }
+    try {
+      for (String keyAlias : hes.getKeys()) {
+        hes.deleteKey(keyAlias);
+      }
+    } catch (IOException e) {
+      LOG.error("Fail to clean the keys created in test due to the error", e);
+    }
+  }
+
+  public void clearUDFsCreatedDuringTests() throws Exception {
+    if (System.getenv(QTEST_LEAVE_FILES) != null) {
+      return;
+    }
+    // Delete functions created by the tests
+    // It is enough to remove functions from the default database, other databases are dropped
+    for (String udfName : db.getFunctions(DEFAULT_DATABASE_NAME, ".*")) {
+      if (!srcUDFs.contains(udfName)) {
+        db.dropFunction(DEFAULT_DATABASE_NAME, udfName);
+      }
+    }
+  }
+
+  /**
+   * Clear out any side effects of running tests
+   */
+  public void clearTablesCreatedDuringTests() throws Exception {
+    if (System.getenv(QTEST_LEAVE_FILES) != null) {
+      return;
+    }
+
+    conf.set("hive.metastore.filter.hook",
+        "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl");
+    db = Hive.get(conf);
+
+    // First delete any MVs to avoid race conditions
+    for (String dbName : db.getAllDatabases()) {
+      SessionState.get().setCurrentDatabase(dbName);
+      for (String tblName : db.getAllTables()) {
+        Table tblObj = null;
+        try {
+          tblObj = db.getTable(tblName);
+        } catch (InvalidTableException e) {
+          LOG.warn("Trying to drop table " + e.getTableName() + ". But it does not exist.");
+          continue;
+        }
+        // only remove MVs first
+        if (!tblObj.isMaterializedView()) {
+          continue;
+        }
+        db.dropTable(dbName, tblName, true, true, fsType == FsType.encrypted_hdfs);
+      }
+    }
+
+    // Delete any tables other than the source tables
+    // and any databases other than the default database.
+    for (String dbName : db.getAllDatabases()) {
+      SessionState.get().setCurrentDatabase(dbName);
+      for (String tblName : db.getAllTables()) {
+        if (!DEFAULT_DATABASE_NAME.equals(dbName) || !srcTables.contains(tblName)) {
+          Table tblObj = null;
+          try {
+            tblObj = db.getTable(tblName);
+          } catch (InvalidTableException e) {
+            LOG.warn("Trying to drop table " + e.getTableName() + ". But it does not exist.");
+            continue;
+          }
+          // only remove MVs first
+          if (!tblObj.isMaterializedView()) {
+            continue;
+          }
+          db.dropTable(dbName, tblName, true, true, fsType == FsType.encrypted_hdfs);
+        }
+      }
+      if (!DEFAULT_DATABASE_NAME.equals(dbName)) {
+        // Drop cascade, functions dropped by cascade
+        db.dropDatabase(dbName, true, true, true);
+      }
+    }
+
+    // delete remaining directories for external tables (can affect stats for following tests)
+    try {
+      Path p = new Path(testWarehouse);
+      FileSystem fileSystem = p.getFileSystem(conf);
+      if (fileSystem.exists(p)) {
+        for (FileStatus status : fileSystem.listStatus(p)) {
+          if (status.isDirectory() && !srcTables.contains(status.getPath().getName())) {
+            fileSystem.delete(status.getPath(), true);
+          }
+        }
+      }
+    } catch (IllegalArgumentException e) {
+      // ignore.. provides invalid url sometimes intentionally
+    }
+    SessionState.get().setCurrentDatabase(DEFAULT_DATABASE_NAME);
+
+    List<String> roleNames = db.getAllRoleNames();
+      for (String roleName : roleNames) {
+        if (!"PUBLIC".equalsIgnoreCase(roleName) && !"ADMIN".equalsIgnoreCase(roleName)) {
+          db.dropRole(roleName);
+        }
+    }
+  }
+
+  /**
+   * Clear out any side effects of running tests
+   */
+  public void clearTestSideEffects() throws Exception {
+    if (System.getenv(QTEST_LEAVE_FILES) != null) {
+      return;
+    }
+
+    // Remove any cached results from the previous test.
+    QueryResultsCache.cleanupInstance();
+
+    // allocate and initialize a new conf since a test can
+    // modify conf by using 'set' commands
+    conf = new HiveConf(IDriver.class);
+    initConf();
+    initConfFromSetup();
+
+    // renew the metastore since the cluster type is unencrypted
+    db = Hive.get(conf);  // propagate new conf to meta store
+
+    clearTablesCreatedDuringTests();
+    clearUDFsCreatedDuringTests();
+    clearKeysCreatedInTests();
+  }
+
+  protected void initConfFromSetup() throws Exception {
+    setup.preTest(conf);
+  }
+
+  public void cleanUp() throws Exception {
+    cleanUp(null);
+  }
+
+  public void cleanUp(String tname) throws Exception {
+    boolean canReuseSession = (tname == null) || !qNoSessionReuseQuerySet.contains(tname);
+    if(!isSessionStateStarted) {
+      startSessionState(canReuseSession);
+    }
+    if (System.getenv(QTEST_LEAVE_FILES) != null) {
+      return;
+    }
+
+    clearTablesCreatedDuringTests();
+    clearUDFsCreatedDuringTests();
+    clearKeysCreatedInTests();
+
+    File cleanupFile = new File(cleanupScript);
+    if (cleanupFile.isFile()) {
+      String cleanupCommands = readEntireFileIntoString(cleanupFile);
+      LOG.info("Cleanup (" + cleanupScript + "):\n" + cleanupCommands);
+      if(cliDriver == null) {
+        cliDriver = new CliDriver();
+      }
+      SessionState.get().getConf().setBoolean("hive.test.shutdown.phase", true);
+      int result = cliDriver.processLine(cleanupCommands);
+      if (result != 0) {
+        LOG.error("Failed during cleanup processLine with code={}. Ignoring", result);
+        // TODO Convert this to an Assert.fail once HIVE-14682 is fixed
+      }
+      SessionState.get().getConf().setBoolean("hive.test.shutdown.phase", false);
+    } else {
+      LOG.info("No cleanup script detected. Skipping.");
+    }
+
+    // delete any contents in the warehouse dir
+    Path p = new Path(testWarehouse);
+    FileSystem fs = p.getFileSystem(conf);
+
+    try {
+      FileStatus [] ls = fs.listStatus(p);
+      for (int i=0; (ls != null) && (i<ls.length); i++) {
+        fs.delete(ls[i].getPath(), true);
+      }
+    } catch (FileNotFoundException e) {
+      // Best effort
+    }
+
+    // TODO: Clean up all the other paths that are created.
+
+    FunctionRegistry.unregisterTemporaryUDF("test_udaf");
+    FunctionRegistry.unregisterTemporaryUDF("test_error");
+  }
+
+  protected void runCreateTableCmd(String createTableCmd) throws Exception {
+    int ecode = 0;
+    ecode = drv.run(createTableCmd).getResponseCode();
+    if (ecode != 0) {
+      throw new Exception("create table command: " + createTableCmd
+          + " failed with exit code= " + ecode);
+    }
+
+    return;
+  }
+
+  protected void runCmd(String cmd) throws Exception {
+    int ecode = 0;
+    ecode = drv.run(cmd).getResponseCode();
+    drv.close();
+    if (ecode != 0) {
+      throw new Exception("command: " + cmd
+          + " failed with exit code= " + ecode);
+    }
+    return;
+  }
+
+  public void createSources() throws Exception {
+    createSources(null);
+  }
+
+  public void createSources(String tname) throws Exception {
+    boolean canReuseSession = (tname == null) || !qNoSessionReuseQuerySet.contains(tname);
+    if(!isSessionStateStarted) {
+      startSessionState(canReuseSession);
+    }
+
+    if(cliDriver == null) {
+      cliDriver = new CliDriver();
+    }
+    cliDriver.processLine("set test.data.dir=" + testFiles + ";");
+    File scriptFile = new File(this.initScript);
+    if (!scriptFile.isFile()) {
+      LOG.info("No init script detected. Skipping");
+      return;
+    }
+    conf.setBoolean("hive.test.init.phase", true);
+
+    String initCommands = readEntireFileIntoString(scriptFile);
+    LOG.info("Initial setup (" + initScript + "):\n" + initCommands);
+
+    int result = cliDriver.processLine(initCommands);
+    LOG.info("Result from cliDrriver.processLine in createSources=" + result);
+    if (result != 0) {
+      Assert.fail("Failed during createSources processLine with code=" + result);
+    }
+
+    conf.setBoolean("hive.test.init.phase", false);
+  }
+
+  public void init() throws Exception {
+
+    // Create remote dirs once.
+    if (mr != null) {
+      createRemoteDirs();
+    }
+
+    // Create views registry
+    HiveMaterializedViewsRegistry.get().init();
+
+    testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE);
+    String execEngine = conf.get("hive.execution.engine");
+    conf.set("hive.execution.engine", "mr");
+    SessionState.start(conf);
+    conf.set("hive.execution.engine", execEngine);
+    db = Hive.get(conf);
+    drv = DriverFactory.newDriver(conf);
+    pd = new ParseDriver();
+    sem = new SemanticAnalyzer(queryState);
+  }
+
+  public void init(String tname) throws Exception {
+    cleanUp(tname);
+    createSources(tname);
+    cliDriver.processCmd("set hive.cli.print.header=true;");
+  }
+
+  public void cliInit(String tname) throws Exception {
+    cliInit(tname, true);
+  }
+
+  public String cliInit(String tname, boolean recreate) throws Exception {
+    if (recreate) {
+      cleanUp(tname);
+      createSources(tname);
+    }
+
+    HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER,
+    "org.apache.hadoop.hive.ql.security.DummyAuthenticator");
+    Utilities.clearWorkMap(conf);
+    CliSessionState ss = new CliSessionState(conf);
+    assert ss != null;
+    ss.in = System.in;
+
+    String outFileExtension = getOutFileExtension(tname);
+    String stdoutName = null;
+    if (outDir != null) {
+      // TODO: why is this needed?
+      File qf = new File(outDir, tname);
+      stdoutName = qf.getName().concat(outFileExtension);
+    } else {
+      stdoutName = tname + outFileExtension;
+    }
+
+    File outf = new File(logDir, stdoutName);
+    OutputStream fo = new BufferedOutputStream(new FileOutputStream(outf));
+    if (qSortQuerySet.contains(tname)) {
+      ss.out = new SortPrintStream(fo, "UTF-8");
+    } else if (qHashQuerySet.contains(tname)) {
+      ss.out = new DigestPrintStream(fo, "UTF-8");
+    } else if (qSortNHashQuerySet.contains(tname)) {
+      ss.out = new SortAndDigestPrintStream(fo, "UTF-8");
+    } else {
+      ss.out = new PrintStream(fo, true, "UTF-8");
+    }
+    ss.err = new CachingPrintStream(fo, true, "UTF-8");
+    ss.setIsSilent(true);
+    SessionState oldSs = SessionState.get();
+
+    boolean canReuseSession = !qNoSessionReuseQuerySet.contains(tname);
+    if (oldSs != null && canReuseSession && clusterType.getCoreClusterType() == CoreClusterType.TEZ) {
+      // Copy the tezSessionState from the old CliSessionState.
+      TezSessionState tezSessionState = oldSs.getTezSession();
+      oldSs.setTezSession(null);
+      ss.setTezSession(tezSessionState);
+      oldSs.close();
+    }
+
+    if (oldSs != null && clusterType.getCoreClusterType() == CoreClusterType.SPARK) {
+      sparkSession = oldSs.getSparkSession();
+      ss.setSparkSession(sparkSession);
+      oldSs.setSparkSession(null);
+      oldSs.close();
+    }
+
+    if (oldSs != null && oldSs.out != null && oldSs.out != System.out) {
+      oldSs.out.close();
+    }
+    if (oldSs != null) {
+      oldSs.close();
+    }
+    SessionState.start(ss);
+
+    cliDriver = new CliDriver();
+
+    if (tname.equals("init_file.q")) {
+      ss.initFiles.add(HIVE_ROOT + "/data/scripts/test_init_file.sql");
+    }
+    cliDriver.processInitFiles(ss);
+
+    return outf.getAbsolutePath();
+  }
+
+  private CliSessionState startSessionState(boolean canReuseSession)
+      throws IOException {
+
+    HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER,
+        "org.apache.hadoop.hive.ql.security.DummyAuthenticator");
+
+    String execEngine = conf.get("hive.execution.engine");
+    conf.set("hive.execution.engine", "mr");
+    CliSessionState ss = new CliSessionState(conf);
+    assert ss != null;
+    ss.in = System.in;
+    ss.out = System.out;
+    ss.err = System.out;
+
+    SessionState oldSs = SessionState.get();
+    if (oldSs != null && canReuseSession && clusterType.getCoreClusterType() == CoreClusterType.TEZ) {
+      // Copy the tezSessionState from the old CliSessionState.
+      TezSessionState tezSessionState = oldSs.getTezSession();
+      ss.setTezSession(tezSessionState);
+      oldSs.setTezSession(null);
+      oldSs.close();
+    }
+
+    if (oldSs != null && clusterType.getCoreClusterType() == CoreClusterType.SPARK) {
+      sparkSession = oldSs.getSparkSession();
+      ss.setSparkSession(sparkSession);
+      oldSs.setSparkSession(null);
+      oldSs.close();
+    }
+    if (oldSs != null && oldSs.out != null && oldSs.out != System.out) {
+      oldSs.out.close();
+    }
+    if (oldSs != null) {
+      oldSs.close();
+    }
+    SessionState.start(ss);
+
+    isSessionStateStarted = true;
+
+    conf.set("hive.execution.engine", execEngine);
+    return ss;
+  }
+
+  public int executeAdhocCommand(String q) {
+    if (!q.contains(";")) {
+      return -1;
+    }
+
+    String q1 = q.split(";")[0] + ";";
+
+    LOG.debug("Executing " + q1);
+    return cliDriver.processLine(q1);
+  }
+
+  public int executeOne(String tname) {
+    String q = qMap.get(tname);
+
+    if (q.indexOf(";") == -1) {
+      return -1;
+    }
+
+    String q1 = q.substring(0, q.indexOf(";") + 1);
+    String qrest = q.substring(q.indexOf(";") + 1);
+    qMap.put(tname, qrest);
+
+    System.out.println("Executing " + q1);
+    return cliDriver.processLine(q1);
+  }
+
+  public int execute(String tname) {
+    return drv.run(qMap.get(tname)).getResponseCode();
+  }
+
+  public int executeClient(String tname1, String tname2) {
+    String commands = getCommand(tname1) + CRLF + getCommand(tname2);
+    return executeClientInternal(commands);
+  }
+
+  public int executeClient(String tname) {
+    return executeClientInternal(getCommand(tname));
+  }
+
+  private int executeClientInternal(String commands) {
+    List<String> cmds = CliDriver.splitSemiColon(commands);
+    int rc = 0;
+
+    String command = "";
+    for (String oneCmd : cmds) {
+      if (StringUtils.endsWith(oneCmd, "\\")) {
+        command += StringUtils.chop(oneCmd) + "\\;";
+        continue;
+      } else {
+        if (isHiveCommand(oneCmd)) {
+          command = oneCmd;
+        } else {
+          command += oneCmd;
+        }
+      }
+      if (StringUtils.isBlank(command)) {
+        continue;
+      }
+
+      if (isCommandUsedForTesting(command)) {
+        rc = executeTestCommand(command);
+      } else {
+        rc = cliDriver.processLine(command);
+      }
+
+      if (rc != 0 && !ignoreErrors()) {
+        break;
+      }
+      command = "";
+    }
+    if (rc == 0 && SessionState.get() != null) {
+      SessionState.get().setLastCommand(null);  // reset
+    }
+    return rc;
+  }
+
+  /**
+   * This allows a .q file to continue executing after a statement runs into an error which is convenient
+   * if you want to use another hive cmd after the failure to sanity check the state of the system.
+   */
+  private boolean ignoreErrors() {
+    return conf.getBoolVar(HiveConf.ConfVars.CLIIGNOREERRORS);
+  }
+
+  private boolean isHiveCommand(String command) {
+    String[] cmd = command.trim().split("\\s+");
+    if (HiveCommand.find(cmd) != null) {
+      return true;
+    } else if (HiveCommand.find(cmd, HiveCommand.ONLY_FOR_TESTING) != null) {
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  private int executeTestCommand(final String command) {
+    String commandName = command.trim().split("\\s+")[0];
+    String commandArgs = command.trim().substring(commandName.length());
+
+    if (commandArgs.endsWith(";")) {
+      commandArgs = StringUtils.chop(commandArgs);
+    }
+
+    //replace ${hiveconf:hive.metastore.warehouse.dir} with actual dir if existed.
+    //we only want the absolute path, so remove the header, such as hdfs://localhost:57145
+    String wareHouseDir = SessionState.get().getConf().getVar(ConfVars.METASTOREWAREHOUSE)
+        .replaceAll("^[a-zA-Z]+://.*?:\\d+", "");
+    commandArgs = commandArgs.replaceAll("\\$\\{hiveconf:hive\\.metastore\\.warehouse\\.dir\\}",
+      wareHouseDir);
+
+    if (SessionState.get() != null) {
+      SessionState.get().setLastCommand(commandName + " " + commandArgs.trim());
+    }
+
+    enableTestOnlyCmd(SessionState.get().getConf());
+
+    try {
+      CommandProcessor proc = getTestCommand(commandName);
+      if (proc != null) {
+        CommandProcessorResponse response = proc.run(commandArgs.trim());
+
+        int rc = response.getResponseCode();
+        if (rc != 0) {
+          SessionState.getConsole().printError(response.toString(), response.getException() != null ?
+                  Throwables.getStackTraceAsString(response.getException()) : "");
+        }
+
+        return rc;
+      } else {
+        throw new RuntimeException("Could not get CommandProcessor for command: " + commandName);
+      }
+    } catch (Exception e) {
+      throw new RuntimeException("Could not execute test command", e);
+    }
+  }
+
+  private CommandProcessor getTestCommand(final String commandName) throws SQLException {
+    HiveCommand testCommand = HiveCommand.find(new String[]{commandName}, HiveCommand.ONLY_FOR_TESTING);
+
+    if (testCommand == null) {
+      return null;
+    }
+
+    return CommandProcessorFactory
+      .getForHiveCommandInternal(new String[]{commandName}, SessionState.get().getConf(),
+        testCommand.isOnlyForTesting());
+  }
+
+  private void enableTestOnlyCmd(HiveConf conf){
+    StringBuilder securityCMDs = new StringBuilder(conf.getVar(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST));
+    for(String c : testOnlyCommands){
+      securityCMDs.append(",");
+      securityCMDs.append(c);
+    }
+    conf.set(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST.toString(), securityCMDs.toString());
+  }
+
+  private boolean isCommandUsedForTesting(final String command) {
+    String commandName = command.trim().split("\\s+")[0];
+    HiveCommand testCommand = HiveCommand.find(new String[]{commandName}, HiveCommand.ONLY_FOR_TESTING);
+    return testCommand != null;
+  }
+
+  private String getCommand(String tname) {
+    String commands = qMap.get(tname);
+    StringBuilder newCommands = new StringBuilder(commands.length());
+    int lastMatchEnd = 0;
+    Matcher commentMatcher = Pattern.compile("^--.*$", Pattern.MULTILINE).matcher(commands);
+    // remove the comments
+    while (commentMatcher.find()) {
+      newCommands.append(commands.substring(lastMatchEnd, commentMatcher.start()));
+      lastMatchEnd = commentMatcher.end();
+    }
+    newCommands.append(commands.substring(lastMatchEnd, commands.length()));
+    commands = newCommands.toString();
+    return commands;
+  }
+
+  public boolean shouldBeSkipped(String tname) {
+    return qSkipSet.contains(tname);
+  }
+
+  private String getOutFileExtension(String fname) {
+    String outFileExtension = ".out";
+    if (qJavaVersionSpecificOutput.contains(fname)) {
+      outFileExtension = ".java" + javaVersion + ".out";
+    }
+
+    return outFileExtension;
+  }
+
+  public void convertSequenceFileToTextFile() throws Exception {
+    // Create an instance of hive in order to create the tables
+    testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE);
+    db = Hive.get(conf);
+
+    // Move all data from dest4_sequencefile to dest4
+    drv
+      .run("FROM dest4_sequencefile INSERT OVERWRITE TABLE dest4 SELECT dest4_sequencefile.*");
+
+    // Drop dest4_sequencefile
+    db.dropTable(Warehouse.DEFAULT_DATABASE_NAME, "dest4_sequencefile",
+        true, true);
+  }
+
+  public QTestProcessExecResult checkNegativeResults(String tname, Exception e) throws Exception {
+
+    String outFileExtension = getOutFileExtension(tname);
+
+    File qf = new File(outDir, tname);
+    String expf = outPath(outDir.toString(), tname.concat(outFileExtension));
+
+    File outf = null;
+    outf = new File(logDir);
+    outf = new File(outf, qf.getName().concat(outFileExtension));
+
+    FileWriter outfd = new FileWriter(outf);
+    if (e instanceof ParseException) {
+      outfd.write("Parse Error: ");
+    } else if (e instanceof SemanticException) {
+      outfd.write("Semantic Exception: \n");
+    } else {
+      throw e;
+    }
+
+    outfd.write(e.getMessage());
+    outfd.close();
+
+    QTestProcessExecResult result = executeDiffCommand(outf.getPath(), expf, false,
+                                     qSortSet.contains(qf.getName()));
+    if (overWrite) {
+      overwriteResults(outf.getPath(), expf);
+      return QTestProcessExecResult.createWithoutOutput(0);
+    }
+
+    return result;
+  }
+
+  public QTestProcessExecResult checkParseResults(String tname, ASTNode tree) throws Exception {
+
+    if (tree != null) {
+      String outFileExtension = getOutFileExtension(tname);
+
+      File parseDir = new File(outDir, "parse");
+      String expf = outPath(parseDir.toString(), tname.concat(outFileExtension));
+
+      File outf = null;
+      outf = new File(logDir);
+      outf = new File(outf, tname.concat(outFileExtension));
+
+      FileWriter outfd = new FileWriter(outf);
+      outfd.write(tree.toStringTree());
+      outfd.close();
+
+      QTestProcessExecResult exitVal = executeDiffCommand(outf.getPath(), expf, false, false);
+
+      if (overWrite) {
+        overwriteResults(outf.getPath(), expf);
+        return QTestProcessExecResult.createWithoutOutput(0);
+      }
+
+      return exitVal;
+    } else {
+      throw new Exception("Parse tree is null");
+    }
+  }
+
+  /**
+   * Given the current configurations (e.g., hadoop version and execution mode), return
+   * the correct file name to compare with the current test run output.
+   * @param outDir The directory where the reference log files are stored.
+   * @param testName The test file name (terminated by ".out").
+   * @return The file name appended with the configuration values if it exists.
+   */
+  public String outPath(String outDir, String testName) {
+    String ret = (new File(outDir, testName)).getPath();
+    // List of configurations. Currently the list consists of hadoop version and execution mode only
+    List<String> configs = new ArrayList<String>();
+    configs.add(this.clusterType.toString());
+    configs.add(this.hadoopVer);
+
+    Deque<String> stack = new LinkedList<String>();
+    StringBuilder sb = new StringBuilder();
+    sb.append(testName);
+    stack.push(sb.toString());
+
+    // example file names are input1.q.out_mr_0.17 or input2.q.out_0.17
+    for (String s: configs) {
+      sb.append('_');
+      sb.append(s);
+      stack.push(sb.toString());
+    }
+    while (stack.size() > 0) {
+      String fileName = stack.pop();
+      File f = new File(outDir, fileName);
+      if (f.exists()) {
+        ret = f.getPath();
+        break;
+      }
+    }
+   return ret;
+  }
+
+  private Pattern[] toPattern(String[] patternStrs) {
+    Pattern[] patterns = new Pattern[patternStrs.length];
+    for (int i = 0; i < patternStrs.length; i++) {
+      patterns[i] = Pattern.compile(patternStrs[i]);
+    }
+    return patterns;
+  }
+
+  private void maskPatterns(Pattern[] patterns, String fname) throws Exception {
+    String maskPattern = "#### A masked pattern was here ####";
+    String partialMaskPattern = "#### A PARTIAL masked pattern was here ####";
+
+    String line;
+    BufferedReader in;
+    BufferedWriter out;
+
+    File file = new File(fname);
+    File fileOrig = new File(fname + ".orig");
+    FileUtils.copyFile(file, fileOrig);
+
+    in = new BufferedReader(new InputStreamReader(new FileInputStream(fileOrig), "UTF-8"));
+    out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file), "UTF-8"));
+
+    boolean lastWasMasked = false;
+    boolean partialMaskWasMatched = false;
+    Matcher matcher;
+    while (null != (line = in.readLine())) {
+      if (fsType == FsType.encrypted_hdfs) {
+        for (Pattern pattern : partialReservedPlanMask) {
+          matcher = pattern.matcher(line);
+          if (matcher.find()) {
+            line = partialMaskPattern + " " + matcher.group(0);
+            partialMaskWasMatched = true;
+            break;
+          }
+        }
+      }
+      else {
+        for (PatternReplacementPair prp : partialPlanMask) {
+          matcher = prp.pattern.matcher(line);
+          if (matcher.find()) {
+            line = line.replaceAll(prp.pattern.pattern(), prp.replacement);
+            partialMaskWasMatched = true;
+          }
+        }
+      }
+
+      if (!partialMaskWasMatched) {
+        for (Pair<Pattern, String> pair : patternsWithMaskComments) {
+          Pattern pattern = pair.getLeft();
+          String maskComment = pair.getRight();
+
+          matcher = pattern.matcher(line);
+          if (matcher.find()) {
+            line = matcher.replaceAll(maskComment);
+            partialMaskWasMatched = true;
+            break;
+          }
+        }
+
+        for (Pattern pattern : patterns) {
+          line = pattern.matcher(line).replaceAll(maskPattern);
+        }
+      }
+
+      if (line.equals(maskPattern)) {
+        // We're folding multiple masked lines into one.
+        if (!lastWasMasked) {
+          out.write(line);
+          out.write("\n");
+          lastWasMasked = true;
+          partialMaskWasMatched = false;
+        }
+      } else {
+        out.write(line);
+        out.write("\n");
+        lastWasMasked = false;
+        partialMaskWasMatched = false;
+      }
+    }
+
+    in.close();
+    out.close();
+  }
+
+  private final Pattern[] planMask = toPattern(new String[] {
+      ".*file:.*",
+      ".*pfile:.*",
+      ".*/tmp/.*",
+      ".*invalidscheme:.*",
+      ".*lastUpdateTime.*",
+      ".*lastAccessTime.*",
+      ".*lastModifiedTime.*",
+      ".*[Oo]wner.*",
+      ".*CreateTime.*",
+      ".*LastAccessTime.*",
+      ".*Location.*",
+      ".*LOCATION '.*",
+      ".*transient_lastDdlTime.*",
+      ".*last_modified_.*",
+      ".*at org.*",
+      ".*at sun.*",
+      ".*at java.*",
+      ".*at junit.*",
+      ".*Caused by:.*",
+      ".*LOCK_QUERYID:.*",
+      ".*LOCK_TIME:.*",
+      ".*grantTime.*",
+      ".*[.][.][.] [0-9]* more.*",
+      ".*job_[0-9_]*.*",
+      ".*job_local[0-9_]*.*",
+      ".*USING 'java -cp.*",
+      "^Deleted.*",
+      ".*DagName:.*",
+      ".*DagId:.*",
+      ".*Input:.*/data/files/.*",
+      ".*Output:.*/data/files/.*",
+      ".*total number of created files now is.*",
+      ".*.hive-staging.*",
+      ".*Warning.*",
+      "pk_-?[0-9]*_[0-9]*_[0-9]*",
+      "fk_-?[0-9]*_[0-9]*_[0-9]*",
+      "uk_-?[0-9]*_[0-9]*_[0-9]*",
+      "nn_-?[0-9]*_[0-9]*_[0-9]*",
+      ".*at com\\.sun\\.proxy.*",
+      ".*at com\\.jolbox.*",
+      ".*at com\\.zaxxer.*",
+      "org\\.apache\\.hadoop\\.hive\\.metastore\\.model\\.MConstraint@([0-9]|[a-z])*",
+      "^Repair: Added partition to metastore.*"
+  });
+
+  private final Pattern[] partialReservedPlanMask = toPattern(new String[] {
+      "data/warehouse/(.*?/)+\\.hive-staging"  // the directory might be db/table/partition
+      //TODO: add more expected test result here
+  });
+  /**
+   * Pattern to match and (partial) replacement text.
+   * For example, {"transaction":76,"bucketid":8249877}.  We just want to mask 76 but a regex that
+   * matches just 76 will match a lot of other things.
+   */
+  private final static class PatternReplacementPair {
+    private final Pattern pattern;
+    private final String replacement;
+    PatternReplacementPair(Pattern p, String r) {
+      pattern = p;
+      replacement = r;
+    }
+  }
+  private final PatternReplacementPair[] partialPlanMask;
+  {
+    ArrayList<PatternReplacementPair> ppm = new ArrayList<>();
+    ppm.add(new PatternReplacementPair(Pattern.compile("\\{\"transactionid\":[1-9][0-9]*,\"bucketid\":"),
+      "{\"transactionid\":### Masked txnid ###,\"bucketid\":"));
+
+    ppm.add(new PatternReplacementPair(Pattern.compile("attempt_[0-9]+"), "attempt_#ID#"));
+    ppm.add(new PatternReplacementPair(Pattern.compile("vertex_[0-9_]+"), "vertex_#ID#"));
+    ppm.add(new PatternReplacementPair(Pattern.compile("task_[0-9_]+"), "task_#ID#"));
+    partialPlanMask = ppm.toArray(new PatternReplacementPair[ppm.size()]);
+  }
+  /* This list may be modified by specific cli drivers to mask strings that change on every test */
+  private final List<Pair<Pattern, String>> patternsWithMaskComments =
+      new ArrayList<Pair<Pattern, String>>() {
+        {
+          add(toPatternPair("(pblob|s3.?|swift|wasb.?).*hive-staging.*",
+              "### BLOBSTORE_STAGING_PATH ###"));
+          add(toPatternPair(PATH_HDFS_WITH_DATE_USER_GROUP_REGEX,
+              "### USER ### ### GROUP ###$3$4 ### HDFS DATE ### $6### HDFS PATH ###"));
+          add(toPatternPair(PATH_HDFS_REGEX, "$1### HDFS PATH ###"));
+        }
+      };
+
+  private Pair<Pattern, String> toPatternPair(String patternStr, String maskComment) {
+    return ImmutablePair.of(Pattern.compile(patternStr), maskComment);
+  }
+
+  public void addPatternWithMaskComment(String patternStr, String maskComment) {
+    patternsWithMaskComments.add(toPatternPair(patternStr, maskComment));
+  }
+
+  public QTestProcessExecResult checkCliDriverResults(String tname) throws Exception {
+    assert(qMap.containsKey(tname));
+
+    String outFileExtension = getOutFileExtension(tname);
+    String outFileName = outPath(outDir, tname + outFileExtension);
+
+    File f = new File(logDir, tname + outFileExtension);
+
+    maskPatterns(planMask, f.getPath());
+    QTestProcessExecResult exitVal = executeDiffCommand(f.getPath(),
+                                     outFileName, false,
+                                     qSortSet.contains(tname));
+
+    if (overWrite) {
+      overwriteResults(f.getPath(), outFileName);
+      return QTestProcessExecResult.createWithoutOutput(0);
+    }
+
+    return exitVal;
+  }
+
+
+  public QTestProcessExecResult checkCompareCliDriverResults(String tname, List<String> outputs)
+      throws Exception {
+    assert outputs.size() > 1;
+    maskPatterns(planMask, outputs.get(0));
+    for (int i = 1; i < outputs.size(); ++i) {
+      maskPatterns(planMask, outputs.get(i));
+      QTestProcessExecResult result = executeDiffCommand(
+          outputs.get(i - 1), outputs.get(i), false, qSortSet.contains(tname));
+      if (result.getReturnCode() != 0) {
+        System.out.println("Files don't match: " + outputs.get(i - 1) + " and " + outputs.get(i));
+        return result;
+      }
+    }
+    return QTestProcessExecResult.createWithoutOutput(0);
+  }
+
+  private static void overwriteResults(String inFileName, String outFileName) throws Exception {
+    // This method can be replaced with Files.copy(source, target, REPLACE_EXISTING)
+    // once Hive uses JAVA 7.
+    System.out.println("Overwriting results " + inFileName + " to " + outFileName);
+    int result = executeCmd(new String[]{
+        "cp",
+        getQuotedString(inFileName),
+        getQuotedString(outFileName)
+    }).getReturnCode();
+    if (result != 0) {
+      throw new IllegalStateException("Unexpected error while overwriting " +
+          inFileName + " with " + outFileName);
+    }
+  }
+
+  private static QTestProcessExecResult executeDiffCommand(String inFileName,
+                                                           String outFileName,
+                                                           boolean ignoreWhiteSpace,
+                                                           boolean sortResults
+      ) throws Exception {
+
+    QTestProcessExecResult result;
+
+    if (sortResults) {
+      // sort will try to open the output file in write mode on windows. We need to
+      // close it first.
+      SessionState ss = SessionState.get();
+      if (ss != null && ss.out != null && ss.out != System.out) {
+        ss.out.close();
+      }
+
+      String inSorted = inFileName + SORT_SUFFIX;
+      String outSorted = outFileName + SORT_SUFFIX;
+
+      sortFiles(inFileName, inSorted);
+      sortFiles(outFileName, outSorted);
+
+      inFileName = inSorted;
+      outFileName = outSorted;
+    }
+
+    ArrayList<String> diffCommandArgs = new ArrayList<String>();
+    diffCommandArgs.add("diff");
+
+    // Text file comparison
+    diffCommandArgs.add("-a");
+
+    // Ignore changes in the amount of white space
+    if (ignoreWhiteSpace) {
+      diffCommandArgs.add("-b");
+    }
+
+    // Add files to compare to the arguments list
+    diffCommandArgs.add(getQuotedString(inFileName));
+    diffCommandArgs.add(getQuotedString(outFileName));
+
+    result = executeCmd(diffCommandArgs);
+
+    if (sortResults) {
+      new File(inFileName).delete();
+      new File(outFileName).delete();
+    }
+
+    return result;
+  }
+
+  private static void sortFiles(String in, String out) throws Exception {
+    int result = executeCmd(new String[]{
+        "sort",
+        getQuotedString(in),
+    }, out, null).getReturnCode();
+    if (result != 0) {
+      throw new IllegalStateException("Unexpected error while sorting " + in);
+    }
+  }
+
+  private static QTestProcessExecResult executeCmd(Collection<String> args) throws Exception {
+    return executeCmd(args, null, null);
+  }
+
+  private static QTestProcessExecResult executeCmd(String[] args) throws Exception {
+    return executeCmd(args, null, null);
+  }
+
+  private static QTestProcessExecResult executeCmd(Collection<String> args, String outFile,
+                                                   String errFile) throws Exception {
+    String[] cmdArray = args.toArray(new String[args.size()]);
+    return executeCmd(cmdArray, outFile, errFile);
+  }
+
+  private static QTestProcessExecResult executeCmd(String[] args, String outFile,
+                                                   String errFile) throws Exception {
+    System.out.println("Running: " + org.apache.commons.lang.StringUtils.join(args, ' '));
+
+    PrintStream out = outFile == null ?
+      SessionState.getConsole().getChildOutStream() :
+      new PrintStream(new FileOutputStream(outFile), true, "UTF-8");
+    PrintStream err = errFile == null ?
+      SessionState.getConsole().getChildErrStream() :
+      new PrintStream(new FileOutputStream(errFile), true, "UTF-8");
+
+    Process executor = Runtime.getRuntime().exec(args);
+
+    ByteArrayOutputStream bos = new ByteArrayOutputStream();
+    PrintStream str = new PrintStream(bos, true, "UTF-8");
+
+    StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, err);
+    StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, out, str);
+
+    outPrinter.start();
+    errPrinter.start();
+
+    int result = executor.waitFor();
+
+    outPrinter.join();
+    errPrinter.join();
+
+    if (outFile != null) {
+      out.close();
+    }
+
+    if (errFile != null) {
+      err.close();
+    }
+
+    return QTestProcessExecResult.
+        create(result, new String(bos.toByteArray(), StandardCharsets.UTF_8));
+  }
+
+  private static String getQuotedString(String str){
+    return str;
+  }
+
+  public ASTNode parseQuery(String tname) throws Exception {
+    return pd.parse(qMap.get(tname));
+  }
+
+  public void resetParser() throws SemanticException {
+    pd = new ParseDriver();
+    queryState = new QueryState.Builder().withHiveConf(conf).build();
+    sem = new SemanticAnalyzer(queryState);
+  }
+
+
+  public List<Task<? extends Serializable>> analyzeAST(ASTNode ast) throws Exception {
+
+    // Do semantic analysis and plan generation
+    Context ctx = new Context(conf);
+    while ((ast.getToken() == null) && (ast.getChildCount() > 0)) {
+      ast = (ASTNode) ast.getChild(0);
+    }
+    sem.getOutputs().clear();
+    sem.getInputs().clear();
+    sem.analyze(ast, ctx);
+    ctx.clear();
+    return sem.getRootTasks();
+  }
+
+  public TreeMap<String, String> getQMap() {
+    return qMap;
+  }
+
+  /**
+   * QTestSetup defines test fixtures which are reused across testcases,
+   * and are needed before any test can be run
+   */
+  public static class QTestSetup
+  {
+    private MiniZooKeeperCluster zooKeeperCluster = null;
+    private int zkPort;
+    private ZooKeeper zooKeeper;
+
+    public QTestSetup() {
+    }
+
+    public void preTest(HiveConf conf) throws Exception {
+
+      if (zooKeeperCluster == null) {
+        //create temp dir
+        String tmpBaseDir =  System.getProperty(TEST_TMP_DIR_PROPERTY);
+        File tmpDir = Utilities.createTempDir(tmpBaseDir);
+
+        zooKeeperCluster = new MiniZooKeeperCluster();
+        zkPort = zooKeeperCluster.startup(tmpDir);
+      }
+
+      if (zooKeeper != null) {
+        zooKeeper.close();
+      }
+
+      int sessionTimeout =  (int) conf.getTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT, TimeUnit.MILLISECONDS);
+      zooKeeper = new ZooKeeper("localhost:" + zkPort, sessionTimeout, new Watcher() {
+        @Override
+        public void process(WatchedEvent arg0) {
+        }
+      });
+
+      String zkServer = "localhost";
+      conf.set("hive.zookeeper.quorum", zkServer);
+      conf.set("hive.zookeeper.client.port", "" + zkPort);
+    }
+
+    public void postTest(HiveConf conf) throws Exception {
+      if (zooKeeperCluster == null) {
+        return;
+      }
+
+      if (zooKeeper != null) {
+        zooKeeper.close();
+      }
+
+      ZooKeeperHiveLockManager.releaseAllLocks(conf);
+    }
+
+    public void tearDown() throws Exception {
+      CuratorFrameworkSingleton.closeAndReleaseInstance();
+
+      if (zooKeeperCluster != null) {
+        zooKeeperCluster.shutdown();
+        zooKeeperCluster = null;
+      }
+    }
+  }
+
+  /**
+   * QTRunner: Runnable class for running a a single query file.
+   *
+   **/
+  public static class QTRunner implements Runnable {
+    private final QTestUtil qt;
+    private final String fname;
+
+    public QTRunner(QTestUtil qt, String fname) {
+      this.qt = qt;
+      this.fname = fname;
+    }
+
+    @Override
+    public void run() {
+      try {
+        // assumption is that environment has already been cleaned once globally
+        // hence each thread does not call cleanUp() and createSources() again
+        qt.cliInit(fname, false);
+        qt.executeClient(fname);
+      } catch (Throwable e) {
+        System.err.println("Query file " + fname + " failed with exception "
+            + e.getMessage());
+        e.printStackTrace();
+        outputTestFailureHelpMessage();
+      }
+    }
+  }
+
+  /**
+   * Setup to execute a set of query files. Uses QTestUtil to do so.
+   *
+   * @param qfiles
+   *          array of input query files containing arbitrary number of hive
+   *          queries
+   * @param resDir
+   *          output directory
+   * @param logDir
+   *          log directory
+   * @return one QTestUtil for each query file
+   */
+  public static QTestUtil[] queryListRunnerSetup(File[] qfiles, String resDir,
+                                                 String logDir, String initScript,
+                                                 String cleanupScript) throws Exception
+  {
+    QTestUtil[] qt = new QTestUtil[qfiles.length];
+    for (int i = 0; i < qfiles.length; i++) {
+      qt[i] = new QTestUtil(resDir, logDir, MiniClusterType.none, null, "0.20",
+        initScript == null ? defaultInitScript : initScript,
+        cleanupScript == null ? defaultCleanupScript : cleanupScript, false);
+      qt[i].addFile(qfiles[i]);
+      qt[i].clearTestSideEffects();
+    }
+
+    return qt;
+  }
+
+  /**
+   * Executes a set of query files in sequence.
+   *
+   * @param qfiles
+   *          array of input query files containing arbitrary number of hive
+   *          queries
+   * @param qt
+   *          array of QTestUtils, one per qfile
+   * @return true if all queries passed, false otw
+   */
+  public static boolean queryListRunnerSingleThreaded(File[] qfiles, QTestUtil[] qt)
+    throws Exception
+  {
+    boolean failed = false;
+    qt[0].cleanUp();
+    qt[0].createSources();
+    for (int i = 0; i < qfiles.length && !failed; i++) {
+      qt[i].clearTestSideEffects();
+      qt[i].cliInit(qfiles[i].getName(), false);
+      qt[i].executeClient(qfiles[i].getName());
+      QTestProcessExecResult result = qt[i].checkCliDriverResults(qfiles[i].getName());
+      if (result.getReturnCode() != 0) {
+        failed = true;
+        StringBuilder builder = new StringBuilder();
+        builder.append("Test ")
+            .append(qfiles[i].getName())
+            .append(" results check failed with error code ")
+            .append(result.getReturnCode());
+        if (Strings.isNotEmpty(result.getCapturedOutput())) {
+          builder.append(" and diff value ").append(result.getCapturedOutput());
+        }
+        System.err.println(builder.toString());
+        outputTestFailureHelpMessage();
+      }
+      qt[i].clearPostTestEffects();
+    }
+    return (!failed);
+  }
+
+  /**
+   * Executes a set of query files parallel.
+   *
+   * Each query file is run in a separate thread. The caller has to arrange
+   * that different query files do not collide (in terms of destination tables)
+   *
+   * @param qfiles
+   *          array of input query files containing arbitrary number of hive
+   *          queries
+   * @param qt
+   *          array of QTestUtils, one per qfile
+   * @return true if all queries passed, false otw
+   *
+   */
+  public static boolean queryListRunnerMultiThreaded(File[] qfiles, QTestUtil[] qt)
+    throws Exception
+  {
+    boolean failed = false;
+
+    // in multithreaded mode - do cleanup/initialization just once
+
+    qt[0].cleanUp();
+    qt[0].createSources();
+    qt[0].clearTestSideEffects();
+
+    QTRunner[] qtRunners = new QTRunner[qfiles.length];
+    Thread[] qtThread = new Thread[qfiles.length];
+
+    for (int i = 0; i < qfiles.length; i++) {
+      qtRunners[i] = new QTRunner(qt[i], qfiles[i].getName());
+      qtThread[i] = new Thread(qtRunners[i]);
+    }
+
+    for (int i = 0; i < qfiles.length; i++) {
+      qtThread[i].start();
+    }
+
+    for (int i = 0; i < qfiles.length; i++) {
+      qtThread[i].join();
+      QTestProcessExecResult result = qt[i].checkCliDriverResults(qfiles[i].getName());
+      if (result.getReturnCode() != 0) {
+        failed = true;
+        StringBuilder builder = new StringBuilder();
+        builder.append("Test ")
+            .append(qfiles[i].getName())
+            .append(" results check failed with error code ")
+            .append(result.getReturnCode());
+        if (Strings.isNotEmpty(result.getCapturedOutput())) {
+          builder.append(" and diff value ").append(result.getCapturedOutput());
+        }
+        System.err.println(builder.toString());
+        outputTestFailureHelpMessage();
+      }
+    }
+    return (!failed);
+  }
+
+  public static void outputTestFailureHelpMessage() {
+    System.err.println(
+      "See ./ql/target/tmp/log/hive.log or ./itests/qtest/target/tmp/log/hive.log, or check " +
+        "./ql/target/surefire-reports or ./itests/qtest/target/surefire-reports/ for specific " +
+        "test cases logs.");
+    System.err.flush();
+  }
+
+  private static String[] cachedQvFileList = null;
+  private static ImmutableList<String> cachedDefaultQvFileList = null;
+  private static Pattern qvSuffix = Pattern.compile("_[0-9]+.qv$", Pattern.CASE_INSENSITIVE);
+
+  public static List<String> getVersionFiles(String queryDir, String tname) {
+    ensureQvFileList(queryDir);
+    List<String> result = getVersionFilesInternal(tname);
+    if (result == null) {
+      result = cachedDefaultQvFileList;
+    }
+    return result;
+  }
+
+  private static void ensureQvFileList(String queryDir) {
+    if (cachedQvFileList != null) {
+      return;
+    }
+    // Not thread-safe.
+    System.out.println("Getting versions from " + queryDir);
+    cachedQvFileList = (new File(queryDir)).list(new FilenameFilter() {
+      @Override
+      public boolean accept(File dir, String name) {
+        return name.toLowerCase().endsWith(".qv");
+      }
+    });
+    if (cachedQvFileList == null) {
+      return; // no files at all
+    }
+    Arrays.sort(cachedQvFileList, String.CASE_INSENSITIVE_ORDER);
+    List<String> defaults = getVersionFilesInternal("default");
+    cachedDefaultQvFileList = (defaults != null)
+        ? ImmutableList.copyOf(defaults) : ImmutableList.<String>of();
+  }
+
+  private static List<String> getVersionFilesInternal(String tname) {
+    if (cachedQvFileList == null) {
+      return new ArrayList<String>();
+    }
+    int pos = Arrays.binarySearch(cachedQvFileList, tname, String.CASE_INSENSITIVE_ORDER);
+    if (pos >= 0) {
+      throw new BuildException("Unexpected file list element: " + cachedQvFileList[pos]);
+    }
+    List<String> result = null;
+    for (pos = (-pos - 1); pos < cachedQvFileList.length; ++pos) {
+      String candidate = cachedQvFileList[pos];
+      if (candidate.length() <= tname.length()
+          || !tname.equalsIgnoreCase(candidate.substring(0, tname.length()))
+          || !qvSuffix.matcher(candidate.substring(tname.length())).matches()) {
+        break;
+      }
+      if (result == null) {
+        result = new ArrayList<String>();
+      }
+      result.add(candidate);
+    }
+    return result;
+  }
+
+  public void failed(int ecode, String fname, String debugHint) {
+    String command = SessionState.get() != null ? SessionState.get().getLastCommand() : null;
+    String message = "Client execution failed with error code = " + ecode +
+        (command != null ? " running \"" + command : "") + "\" fname=" + fname + " " +
+        (debugHint != null ? debugHint : "");
+    LOG.error(message);
+    Assert.fail(message);
+  }
+
+  // for negative tests, which is succeeded.. no need to print the query string
+  public void failed(String fname, String debugHint) {
+    Assert.fail(
+        "Client Execution was expected to fail, but succeeded with error code 0 for fname=" +
+            fname + (debugHint != null ? (" " + debugHint) : ""));
+  }
+
+  public void failedDiff(int ecode, String fname, String debugHint) {
+    String message =
+        "Client Execution succeeded but contained differences " +
+            "(error code = " + ecode + ") after executing " +
+            fname + (debugHint != null ? (" " + debugHint) : "");
+    LOG.error(message);
+    Assert.fail(message);
+  }
+
+  public void failed(Exception e, String fname, String debugHint) {
+    String command = SessionState.get() != null ? SessionState.get().getLastCommand() : null;
+    System.err.println("Failed query: " + fname);
+    System.err.flush();
+    Assert.fail("Unexpected exception " +
+        org.apache.hadoop.util.StringUtils.stringifyException(e) + "\n" +
+        (command != null ? " running " + command : "") +
+        (debugHint != null ? debugHint : ""));
+  }
+
+  public static void addTestsToSuiteFromQfileNames(
+    String qFileNamesFile,
+    Set<String> qFilesToExecute,
+    TestSuite suite,
+    Object setup,
+    SuiteAddTestFunctor suiteAddTestCallback) {
+    try {
+      File qFileNames = new File(qFileNamesFile);
+      FileReader fr = new FileReader(qFileNames.getCanonicalFile());
+      BufferedReader br = new BufferedReader(fr);
+      String fName = null;
+
+      while ((fName = br.readLine()) != null) {
+        if (fName.isEmpty() || fName.trim().equals("")) {
+          continue;
+        }
+
+        int eIdx = fName.indexOf('.');
+
+        if (eIdx == -1) {
+          continue;
+        }
+
+        String tName = fName.substring(0, eIdx);
+
+        if (qFilesToExecute.isEmpty() || qFilesToExecute.contains(fName)) {
+          suiteAddTestCallback.addTestToSuite(suite, setup, tName);
+        }
+      }
+      br.close();
+    } catch (Exception e) {
+      Assert.fail("Unexpected exception " + org.apache.hadoop.util.StringUtils.stringifyException(e));
+    }
+  }
+
+  public static void setupMetaStoreTableColumnStatsFor30TBTPCDSWorkload(HiveConf conf) {
+    Connection conn = null;
+    ArrayList<Statement> statements = new ArrayList<Statement>(); // list of Statements, PreparedStatements
+
+    try {
+      Properties props = new Properties(); // connection properties
+      props.put("user", conf.get("javax.jdo.option.ConnectionUserName"));
+      props.put("password", conf.get("javax.jdo.option.ConnectionPassword"));
+      conn = DriverManager.getConnection(conf.get("javax.jdo.option.ConnectionURL"), props);
+      ResultSet rs = null;
+      Statement s = conn.createStatement();
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Connected to metastore database ");
+      }
+
+      String mdbPath = HIVE_ROOT + "/data/files/tpcds-perf/metastore_export/";
+
+      // Setup the table column stats
+      BufferedReader br = new BufferedReader(
+          new FileReader(
+              new File(HIVE_ROOT + "/metastore/scripts/upgrade/derby/022-HIVE-11107.derby.sql")));
+      String command;
+
+      s.execute("DROP TABLE APP.TABLE_PARAMS");
+      s.execute("DROP TABLE APP.TAB_COL_STATS");
+      // Create the column stats table
+      while ((command = br.readLine()) != null) {
+        if (!command.endsWith(";")) {
+          continue;
+        }
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Going to run command : " + command);
+        }
+        try {
+          PreparedStatement psCommand = conn.prepareStatement(command.substring(0, command.length()-1));
+          statements.add(psCommand);
+          psCommand.execute();
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("successfully completed " + command);
+          }
+        } catch (SQLException e) {
+          LOG.info("Got SQL Exception " + e.getMessage());
+        }
+      }
+      br.close();
+
+      java.nio.file.Path tabColStatsCsv = FileSystems.getDefault().getPath(mdbPath, "csv" ,"TAB_COL_STATS.txt.bz2");
+      java.nio.file.Path tabParamsCsv = FileSystems.getDefault().getPath(mdbPath, "csv", "TABLE_PARAMS.txt.bz2");
+
+      // Set up the foreign key constraints properly in the TAB_COL_STATS data
+      String tmpBaseDir =  System.getProperty(TEST_TMP_DIR_PROPERTY);
+      java.nio.file.Path tmpFileLoc1 = FileSystems.getDefault().getPath(tmpBaseDir, "TAB_COL_STATS.txt");
+      java.nio.file.Path tmpFileLoc2 = FileSystems.getDefault().getPath(tmpBaseDir, "TABLE_PARAMS.txt");
+
+      class MyComp implements Comparator<String> {
+        @Override
+        public int compare(String str1, String str2) {
+          if (str2.length() != str1.length()) {
+            return str2.length() - str1.length();
+          }
+          return str1.compareTo(str2);
+        }
+      }
+
+      final SortedMap<String, Integer> tableNameToID = new TreeMap<String, Integer>(new MyComp());
+
+     rs = s.executeQuery("SELECT * FROM APP.TBLS");
+      while(rs.next()) {
+        String tblName = rs.getString("TBL_NAME");
+        Integer tblId = rs.getInt("TBL_ID");
+        tableNameToID.put(tblName, tblId);
+
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Resultset : " +  tblName + " | " + tblId);
+        }
+      }
+
+      final Map<String, Map<String, String>> data = new HashMap<>();
+      rs = s.executeQuery("select TBLS.TBL_NAME, a.COLUMN_NAME, a.TYPE_NAME from  "
+          + "(select COLUMN_NAME, TYPE_NAME, SDS.SD_ID from APP.COLUMNS_V2 join APP.SDS on SDS.CD_ID = COLUMNS_V2.CD_ID) a"
+          + " join APP.TBLS on  TBLS.SD_ID = a.SD_ID");
+      while (rs.next()) {
+        String tblName = rs.getString(1);
+        String colName = rs.getString(2);
+        String typeName = rs.getString(3);
+        Map<String, String> cols = data.get(tblName);
+        if (null == cols) {
+          cols = new HashMap<>();
+        }
+        cols.put(colName, typeName);
+        data.put(tblName, cols);
+      }
+
+      BufferedReader reader = new BufferedReader(new InputStreamReader(
+        new BZip2CompressorInputStream(Files.newInputStream(tabColStatsCsv, StandardOpenOption.READ))));
+
+      Stream<String> replaced = reader.lines().parallel().map(str-> {
+        String[] splits = str.split(",");
+        String tblName = splits[0];
+        String colName = splits[1];
+        Integer tblID = tableNameToID.get(tblName);
+        StringBuilder sb = new StringBuilder("default@"+tblName + "@" + colName + "@" + data.get(tblName).get(colName)+"@");
+        for (int i = 2; i < splits.length; i++) {
+          sb.append(splits[i]+"@");
+        }
+        // Add tbl_id and empty bitvector
+        return sb.append(tblID).append("@").toString();
+        });
+
+      Files.write(tmpFileLoc1, (Iterable<String>)replaced::iterator);
+      replaced.close();
+      reader.close();
+
+      BufferedReader reader2 = new BufferedReader(new InputStreamReader(
+          new BZip2CompressorInputStream(Files.newInputStream(tabParamsCsv, StandardOpenOption.READ))));
+      final Map<String, String> colStats = new ConcurrentHashMap<>();
+      Stream<String> replacedStream = reader2.lines().parallel().map(str-> {
+        String[] splits = str.split("_@");
+        String tblName = splits[0];
+        Integer tblId = tableNameToID.get(tblName);
+        Map<String, String> cols = data.get(tblName);
+        StringBuilder sb = new StringBuilder();
+        sb.append("{\"COLUMN_STATS\":{");
+        for (String colName : cols.keySet()) {
+          sb.append("\""+colName+"\":\"true\",");
+        }
+        sb.append("},\"BASIC_STATS\":\"true\"}");
+        colStats.put(tblId.toString(), sb.toString());
+
+        return  tblId.toString() + "@" + splits[1];
+      });
+
+      Files.write(tmpFileLoc2, (Iterable<String>)replacedStream::iterator);
+      Files.write(tmpFileLoc2, (Iterable<String>)colStats.entrySet().stream()
+        .map(map->map.getKey()+"@COLUMN_STATS_ACCURATE@"+map.getValue())::iterator, StandardOpenOption.APPEND);
+
+      replacedStream.close();
+      reader2.close();
+      // Load the column stats and table params with 30 TB scale
+      String importStatement1 =  "CALL SYSCS_UTIL.SYSCS_IMPORT_TABLE(null, '" + "TAB_COL_STATS" +
+        "', '" + tmpFileLoc1.toAbsolutePath().toString() +
+        "', '@', null, 'UTF-8', 1)";
+      String importStatement2 =  "CALL SYSCS_UTIL.SYSCS_IMPORT_TABLE(null, '" + "TABLE_PARAMS" +
+        "', '" + tmpFileLoc2.toAbsolutePath().toString() +
+        "', '@', null, 'UTF-8', 1)";
+      try {
+        PreparedStatement psImport1 = conn.prepareStatement(importStatement1);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Going to execute : " + importStatement1);
+        }
+        statements.add(psImport1);
+        psImport1.execute();
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("successfully completed " + importStatement1);
+        }
+        PreparedStatement psImport2 = conn.prepareStatement(importStatement2);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Going to execute : " + importStatement2);
+        }
+        statements.add(psImport2);
+        psImport2.execute();
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("successfully completed " + importStatement2);
+        }
+      } catch (SQLException e) {
+        LOG.info("Got SQL Exception  " +  e.getMessage());
+      }
+    } catch (FileNotFoundException e1) {
+        LOG.info("Got File not found Exception " + e1.getMessage());
+	} catch (IOException e1) {
+        LOG.info("Got IOException " + e1.getMessage());
+	} catch (SQLException e1) {
+        LOG.info("Got SQLException " + e1.getMessage());
+	} finally {
+      // Statements and PreparedStatements
+      int i = 0;
+      while (!statements.isEmpty()) {
+        // PreparedStatement extend Statement
+        Statement st = statements.remove(i);
+        try {
+          if (st != null) {
+            st.close();
+            st = null;
+          }
+        } catch (SQLException sqle) {
+        }
+      }
+
+      //Connection
+      try {
+        if (conn != null) {
+          conn.close();
+          conn = null;
+        }
+      } catch (SQLException sqle) {
+      }
+    }
+  }
+  
+  private static String getHiveRoot() {
+      String path;
+      if (System.getProperty("hive.root") != null) {
+          try {
+              path = new File(System.getProperty("hive.root")).getCanonicalPath();
+          } catch (IOException e) {
+              throw new RuntimeException("error getting hive.root", e);
+          }
+      } else {
+          path = new File("target").getAbsolutePath();
+      }
+      return ensurePathEndsInSlash(new File(path).getAbsolutePath());
+    }
+  
+  public static String ensurePathEndsInSlash(String path) {
+      if (path == null) {
+        throw new NullPointerException("Path cannot be null");
+      }
+      if (path.endsWith(File.separator)) {
+        return path;
+      } else {
+        return path + File.separator;
+      }
+    }
+
+}
diff --git a/phoenix-hive3/src/it/java/org/apache/hadoop/hive/ql/security/DummyAuthenticator.java b/phoenix-hive3/src/it/java/org/apache/hadoop/hive/ql/security/DummyAuthenticator.java
new file mode 100644
index 0000000..45fabf5
--- /dev/null
+++ b/phoenix-hive3/src/it/java/org/apache/hadoop/hive/ql/security/DummyAuthenticator.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.security;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.session.SessionState;
+
+public class DummyAuthenticator implements HiveAuthenticationProvider {
+
+  private final List<String> groupNames;
+  private final String userName;
+  private Configuration conf;
+
+  public DummyAuthenticator() {
+    this.groupNames = new ArrayList<String>();
+    groupNames.add("hive_test_group1");
+    groupNames.add("hive_test_group2");
+    userName = "hive_test_user";
+  }
+
+  @Override
+  public void destroy() throws HiveException{
+    return;
+  }
+
+  @Override
+  public List<String> getGroupNames() {
+    return groupNames;
+  }
+
+  @Override
+  public String getUserName() {
+    return userName;
+  }
+
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  @Override
+  public Configuration getConf() {
+    return this.conf;
+  }
+
+  @Override
+  public void setSessionState(SessionState ss) {
+    //no op
+  }
+
+}
diff --git a/phoenix-hive3/src/it/java/org/apache/phoenix/hive/BaseHivePhoenixStoreIT.java b/phoenix-hive3/src/it/java/org/apache/phoenix/hive/BaseHivePhoenixStoreIT.java
new file mode 100644
index 0000000..15d8bb7
--- /dev/null
+++ b/phoenix-hive3/src/it/java/org/apache/phoenix/hive/BaseHivePhoenixStoreIT.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.QTestProcessExecResult;
+import org.apache.phoenix.jdbc.PhoenixDriver;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.AfterClass;
+
+import com.google.common.base.Throwables;
+
+import java.io.File;
+import java.io.IOException;
+import java.sql.*;
+import java.util.Properties;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * Base class for all Hive Phoenix integration tests that may be run with Tez or MR mini cluster
+ */
+public class BaseHivePhoenixStoreIT extends BaseHBaseManagedTimeIT {
+
+    private static final Log LOG = LogFactory.getLog(BaseHivePhoenixStoreIT.class);
+    protected static HBaseTestingUtility hbaseTestUtil;
+    protected static MiniHBaseCluster hbaseCluster;
+    private static String zkQuorum;
+    protected static Connection conn;
+    private static Configuration conf;
+    protected static HiveTestUtil qt;
+    protected static String hiveOutputDir;
+    protected static String hiveLogDir;
+
+    public static void setup(HiveTestUtil.MiniClusterType clusterType)throws Exception {
+        String hadoopConfDir = System.getenv("HADOOP_CONF_DIR");
+        if (null != hadoopConfDir && !hadoopConfDir.isEmpty()) {
+          LOG.warn("WARNING: HADOOP_CONF_DIR is set in the environment which may cause "
+              + "issues with test execution via MiniDFSCluster");
+        }
+        hbaseTestUtil = new HBaseTestingUtility();
+        conf = hbaseTestUtil.getConfiguration();
+        setUpConfigForMiniCluster(conf);
+        conf.set(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
+        conf.set("hive.metastore.schema.verification","false");
+        hiveOutputDir = new Path(hbaseTestUtil.getDataTestDir(), "hive_output").toString();
+        File outputDir = new File(hiveOutputDir);
+        outputDir.mkdirs();
+        hiveLogDir = new Path(hbaseTestUtil.getDataTestDir(), "hive_log").toString();
+        File logDir = new File(hiveLogDir);
+        logDir.mkdirs();
+        // Setup Hive mini Server
+        Path testRoot = hbaseTestUtil.getDataTestDir();
+        System.setProperty("test.tmp.dir", testRoot.toString());
+        System.setProperty("test.warehouse.dir", (new Path(testRoot, "warehouse")).toString());
+        System.setProperty(HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION.toString(), "false");
+        //System.setProperty(HiveConf.ConfVars.METASTORE_AUTO_CREATE_ALL.toString(),"true");
+        try {
+            qt = new HiveTestUtil(hiveOutputDir, hiveLogDir, clusterType, "", "0.20",null, null, false);
+            // do a one time initialization
+            qt.createSources();
+        } catch (Exception e) {
+            LOG.error("Unexpected exception in setup: " + e.getMessage(), e);
+            fail("Unexpected exception in setup"+Throwables.getStackTraceAsString(e));
+        }
+
+        //Start HBase cluster
+        hbaseCluster = hbaseTestUtil.startMiniCluster(1);
+        MiniDFSCluster x = hbaseTestUtil.getDFSCluster();
+        Class.forName(PhoenixDriver.class.getName());
+        zkQuorum = "localhost:" + hbaseTestUtil.getZkCluster().getClientPort();
+        Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
+        props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
+        conn = DriverManager.getConnection(PhoenixRuntime.JDBC_PROTOCOL +
+                PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum, props);
+        // Setup Hive Output Folder
+
+        Statement stmt = conn.createStatement();
+        stmt.execute("create table t(a integer primary key,b varchar)");
+    }
+
+    protected void runTest(String fname, String fpath) throws Exception {
+        long startTime = System.currentTimeMillis();
+        try {
+            LOG.info("Begin query: " + fname);
+            qt.addFile(fpath);
+
+            if (qt.shouldBeSkipped(fname)) {
+                LOG.info("Test " + fname + " skipped");
+                return;
+            }
+
+            qt.cliInit(fname);
+            qt.clearTestSideEffects();
+            int ecode = qt.executeClient(fname);
+            if (ecode != 0) {
+                qt.failed(ecode, fname, null);
+                return;
+            }
+
+            QTestProcessExecResult result = qt.checkCliDriverResults(fname);
+            if (result.getReturnCode() != 0) {
+              qt.failedDiff(result.getReturnCode(), fname, result.getCapturedOutput());
+            }
+            qt.clearPostTestEffects();
+
+        } catch (Throwable e) {
+            qt.failed(new Exception(e), fname, null);
+        }
+
+        long elapsedTime = System.currentTimeMillis() - startTime;
+        LOG.info("Done query: " + fname + " elapsedTime=" + elapsedTime / 1000 + "s");
+        assertTrue("Test passed", true);
+    }
+
+    protected void createFile(String content, String fullName) throws IOException {
+        FileUtils.write(new File(fullName), content);
+    }
+
+    @AfterClass
+    public static synchronized void tearDownAfterClass() throws Exception {
+        try {
+            conn.close();
+        } finally {
+            try {
+                PhoenixDriver.INSTANCE.close();
+            } finally {
+                try {
+                    DriverManager.deregisterDriver(PhoenixDriver.INSTANCE);
+                } finally {
+                    hbaseTestUtil.shutdownMiniCluster();
+                }
+            }
+        }
+        // Shutdowns down the filesystem -- do this after stopping HBase.
+        if (qt != null) {
+          try {
+              qt.shutdown();
+          } catch (Exception e) {
+              LOG.error("Unexpected exception in setup", e);
+              //fail("Unexpected exception in tearDown");
+          }
+      }
+    }
+}
diff --git a/phoenix-hive3/src/it/java/org/apache/phoenix/hive/HiveMapReduceIT.java b/phoenix-hive3/src/it/java/org/apache/phoenix/hive/HiveMapReduceIT.java
new file mode 100644
index 0000000..7b6fbbb
--- /dev/null
+++ b/phoenix-hive3/src/it/java/org/apache/phoenix/hive/HiveMapReduceIT.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.hive;
+
+import static org.junit.Assert.fail;
+
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+public class HiveMapReduceIT extends HivePhoenixStoreIT {
+
+    @BeforeClass
+    public static void setUpBeforeClass() throws Exception {
+        final String hadoopConfDir = System.getenv("HADOOP_CONF_DIR");
+        if (hadoopConfDir != null && hadoopConfDir.length() != 0) {
+            fail("HADOOP_CONF_DIR is non-empty in the current shell environment which will very likely cause this test to fail.");
+        }
+        setup(HiveTestUtil.MiniClusterType.mr);
+    }
+
+    @Override
+    @Test
+    @Ignore
+    /**
+     * Ignoring because precicate pushdown is skipped for MR (ref:HIVE-18873) when there are multiple aliases
+     */
+    public void testJoinNoColumnMaps() throws Exception {
+
+    }
+
+    @Override
+    @Test
+    @Ignore
+    /**
+     * Ignoring because projection pushdown is incorrect for MR when there are multiple aliases (ref:HIVE-18872)
+     */
+    public void testJoinColumnMaps() throws Exception {
+
+    }
+}
diff --git a/phoenix-hive3/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java b/phoenix-hive3/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java
new file mode 100644
index 0000000..550b2b5
--- /dev/null
+++ b/phoenix-hive3/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java
@@ -0,0 +1,340 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.phoenix.util.StringUtil;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test methods only. All supporting methods should be placed to BaseHivePhoenixStoreIT
+ */
+@Ignore("This class contains only test methods and should not be executed directly")
+public class HivePhoenixStoreIT extends BaseHivePhoenixStoreIT {
+
+    /**
+     * Create a table with two column, insert 1 row, check that phoenix table is created and
+     * the row is there
+     *
+     * @throws Exception
+     */
+    @Test
+    public void simpleTest() throws Exception {
+        String testName = "simpleTest";
+        hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out"));
+        createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString());
+        createFile(StringUtil.EMPTY_STRING, new Path(hiveOutputDir, testName + ".out").toString());
+        StringBuilder sb = new StringBuilder();
+        sb.append("CREATE EXTERNAL TABLE phoenix_table(ID STRING, SALARY STRING)" + HiveTestUtil.CRLF +
+                " STORED BY  \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil
+                .CRLF + " TBLPROPERTIES(" + HiveTestUtil.CRLF +
+                "   'phoenix.table.name'='phoenix_table'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.client.port'='" +
+                hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
+                "   'phoenix.rowkeys'='id');");
+        sb.append("INSERT INTO TABLE phoenix_table" + HiveTestUtil.CRLF +
+                "VALUES ('10', '1000');" + HiveTestUtil.CRLF);
+        String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString();
+        createFile(sb.toString(), fullPath);
+        runTest(testName, fullPath);
+
+        String phoenixQuery = "SELECT * FROM phoenix_table";
+        PreparedStatement statement = conn.prepareStatement(phoenixQuery);
+        ResultSet rs = statement.executeQuery();
+        assert (rs.getMetaData().getColumnCount() == 2);
+        assertTrue(rs.next());
+        assert (rs.getString(1).equals("10"));
+        assert (rs.getString(2).equals("1000"));
+    }
+
+    /**
+     * Create hive table with custom column mapping
+     * @throws Exception
+     */
+
+    @Test
+    public void simpleColumnMapTest() throws Exception {
+        String testName = "cmTest";
+        hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out"));
+        createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString());
+        createFile(StringUtil.EMPTY_STRING, new Path(hiveOutputDir, testName + ".out").toString());
+        StringBuilder sb = new StringBuilder();
+        sb.append("CREATE EXTERNAL TABLE column_table(ID STRING, P1 STRING, p2 STRING)" + HiveTestUtil.CRLF +
+                " STORED BY  \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil
+                .CRLF + " TBLPROPERTIES(" + HiveTestUtil.CRLF +
+                "   'phoenix.table.name'='column_table'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF +
+                "   'phoenix.column.mapping' = 'id:C1, p1:c2, p2:C3'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.client.port'='" +
+                hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
+                "   'phoenix.rowkeys'='id');");
+        sb.append("INSERT INTO TABLE column_table" + HiveTestUtil.CRLF +
+                "VALUES ('1', '2', '3');" + HiveTestUtil.CRLF);
+        String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString();
+        createFile(sb.toString(), fullPath);
+        runTest(testName, fullPath);
+
+        String phoenixQuery = "SELECT C1, \"c2\", C3 FROM column_table";
+        PreparedStatement statement = conn.prepareStatement(phoenixQuery);
+        ResultSet rs = statement.executeQuery();
+        assert (rs.getMetaData().getColumnCount() == 3);
+        assertTrue(rs.next());
+        assert (rs.getString(1).equals("1"));
+        assert (rs.getString(2).equals("2"));
+        assert (rs.getString(3).equals("3"));
+
+    }
+
+
+    /**
+     * Datatype Test
+     *
+     * @throws Exception
+     */
+    @Test
+    public void dataTypeTest() throws Exception {
+        String testName = "dataTypeTest";
+        hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out"));
+        createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString());
+        createFile(StringUtil.EMPTY_STRING, new Path(hiveOutputDir, testName + ".out").toString());
+        StringBuilder sb = new StringBuilder();
+        sb.append("CREATE EXTERNAL TABLE phoenix_datatype(ID int, description STRING, ts TIMESTAMP,  db " +
+                "DOUBLE,fl FLOAT, us INT)" + HiveTestUtil.CRLF +
+                " STORED BY  \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil
+                .CRLF + " TBLPROPERTIES(" + HiveTestUtil.CRLF +
+                "   'phoenix.hbase.table.name'='phoenix_datatype'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.client.port'='" +
+                hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
+                "   'phoenix.rowkeys'='id');");
+        sb.append("INSERT INTO TABLE phoenix_datatype" + HiveTestUtil.CRLF +
+                "VALUES (10, \"foodesc\", \"2013-01-05 01:01:01\", 200,2.0,-1);" + HiveTestUtil.CRLF);
+        String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString();
+        createFile(sb.toString(), fullPath);
+        runTest(testName, fullPath);
+
+        String phoenixQuery = "SELECT * FROM phoenix_datatype";
+        PreparedStatement statement = conn.prepareStatement(phoenixQuery);
+        ResultSet rs = statement.executeQuery();
+        assert (rs.getMetaData().getColumnCount() == 6);
+        while (rs.next()) {
+            assert (rs.getInt(1) == 10);
+            assert (rs.getString(2).equalsIgnoreCase("foodesc"));
+            assert (rs.getDouble(4) == 200);
+            assert (rs.getFloat(5) == 2.0);
+            assert (rs.getInt(6) == -1);
+        }
+    }
+
+    /**
+     * Datatype Test
+     *
+     * @throws Exception
+     */
+    @Test
+    public void MultiKey() throws Exception {
+        String testName = "MultiKey";
+        hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out"));
+        createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString());
+        createFile(StringUtil.EMPTY_STRING, new Path(hiveOutputDir, testName + ".out").toString());
+        StringBuilder sb = new StringBuilder();
+        sb.append("CREATE EXTERNAL TABLE phoenix_MultiKey(ID int, ID2 String,description STRING," +
+                "db DOUBLE,fl FLOAT, us INT)" + HiveTestUtil.CRLF +
+                " STORED BY  \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil
+                .CRLF +
+                " TBLPROPERTIES(" + HiveTestUtil.CRLF +
+                "   'phoenix.hbase.table.name'='phoenix_MultiKey'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.client.port'='" +
+                hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
+                "   'phoenix.rowkeys'='id,id2');" + HiveTestUtil.CRLF);
+        sb.append("INSERT INTO TABLE phoenix_MultiKey" + HiveTestUtil.CRLF +"VALUES (10, \'part2\',\'foodesc\',200,2.0,-1);" +
+                HiveTestUtil.CRLF);
+        String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString();
+        createFile(sb.toString(), fullPath);
+        runTest(testName, fullPath);
+
+        String phoenixQuery = "SELECT * FROM phoenix_MultiKey";
+        PreparedStatement statement = conn.prepareStatement(phoenixQuery);
+        ResultSet rs = statement.executeQuery();
+        assert (rs.getMetaData().getColumnCount() == 6);
+        while (rs.next()) {
+            assert (rs.getInt(1) == 10);
+            assert (rs.getString(2).equalsIgnoreCase("part2"));
+            assert (rs.getString(3).equalsIgnoreCase("foodesc"));
+            assert (rs.getDouble(4) == 200);
+            assert (rs.getFloat(5) == 2.0);
+            assert (rs.getInt(6) == -1);
+        }
+    }
+
+    /**
+     * Test that hive is able to access Phoenix data during MR job (creating two tables and perform join on it)
+     *
+     * @throws Exception
+     */
+    @Test
+    public void testJoinNoColumnMaps() throws Exception {
+        String testName = "testJoin";
+        hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out"));
+        createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString());
+        createFile("#### A masked pattern was here ####\n10\tpart2\tfoodesc\t200.0\t2.0\t-1\t10\tpart2\tfoodesc\t200.0\t2.0\t-1\n",
+                new Path(hiveOutputDir, testName + ".out").toString());
+        StringBuilder sb = new StringBuilder();
+        sb.append("CREATE EXTERNAL TABLE joinTable1(ID int, ID2 String,description STRING," +
+                "db DOUBLE,fl FLOAT, us INT)" + HiveTestUtil.CRLF +
+                " STORED BY  \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil
+                .CRLF +
+                " TBLPROPERTIES(" + HiveTestUtil.CRLF +
+                "   'phoenix.hbase.table.name'='joinTable1'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.client.port'='" +
+                hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
+                "   'phoenix.rowkeys'='id,id2');" + HiveTestUtil.CRLF);
+        sb.append("CREATE EXTERNAL TABLE joinTable2(ID int, ID2 String,description STRING," +
+                "db DOUBLE,fl FLOAT, us INT)" + HiveTestUtil.CRLF +
+                " STORED BY  \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil
+                .CRLF +
+                " TBLPROPERTIES(" + HiveTestUtil.CRLF +
+                "   'phoenix.hbase.table.name'='joinTable2'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.client.port'='" +
+                hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
+                "   'phoenix.rowkeys'='id,id2');" + HiveTestUtil.CRLF);
+
+        sb.append("INSERT INTO TABLE joinTable1" + HiveTestUtil.CRLF +"VALUES (5, \'part2\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
+        sb.append("INSERT INTO TABLE joinTable1" + HiveTestUtil.CRLF +"VALUES (10, \'part2\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
+
+        sb.append("INSERT INTO TABLE joinTable2" + HiveTestUtil.CRLF +"VALUES (5, \'part2\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
+        sb.append("INSERT INTO TABLE joinTable2" + HiveTestUtil.CRLF +"VALUES (10, \'part2\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
+
+        sb.append("SELECT  * from joinTable1 A join joinTable2 B on A.id = B.id WHERE A.ID=10;" +
+                HiveTestUtil.CRLF);
+
+        String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString();
+        createFile(sb.toString(), fullPath);
+        runTest(testName, fullPath);
+    }
+
+    /**
+     * Test that hive is able to access Phoenix data during MR job (creating two tables and perform join on it)
+     *
+     * @throws Exception
+     */
+    @Test
+    public void testJoinColumnMaps() throws Exception {
+        String testName = "testJoin";
+        hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out"));
+        createFile("#### A masked pattern was here ####\n10\t200.0\tpart2\n", new Path(hiveOutputDir, testName + ".out").toString());
+        createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString());
+
+        StringBuilder sb = new StringBuilder();
+        sb.append("CREATE EXTERNAL TABLE joinTable3(ID int, ID2 String,description STRING," +
+                "db DOUBLE,fl FLOAT, us INT)" + HiveTestUtil.CRLF +
+                " STORED BY  \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil
+                .CRLF +
+                " TBLPROPERTIES(" + HiveTestUtil.CRLF +
+                "   'phoenix.hbase.table.name'='joinTable3'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.client.port'='" +
+                hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
+                "   'phoenix.column.mapping' = 'id:i1, id2:I2, db:db'," + HiveTestUtil.CRLF +
+                "   'phoenix.rowkeys'='id,id2');" + HiveTestUtil.CRLF);
+        sb.append("CREATE EXTERNAL TABLE joinTable4(ID int, ID2 String,description STRING," +
+                "db DOUBLE,fl FLOAT, us INT)" + HiveTestUtil.CRLF +
+                " STORED BY  \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil
+                .CRLF +
+                " TBLPROPERTIES(" + HiveTestUtil.CRLF +
+                "   'phoenix.hbase.table.name'='joinTable4'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.client.port'='" +
+                hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
+                "   'phoenix.column.mapping' = 'id:i1, id2:I2, db:db'," + HiveTestUtil.CRLF +
+                "   'phoenix.rowkeys'='id,id2');" + HiveTestUtil.CRLF);
+
+        sb.append("INSERT INTO TABLE joinTable3" + HiveTestUtil.CRLF +"VALUES (5, \'part1\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
+        sb.append("INSERT INTO TABLE joinTable3" + HiveTestUtil.CRLF +"VALUES (10, \'part1\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
+
+        sb.append("INSERT INTO TABLE joinTable4" + HiveTestUtil.CRLF +"VALUES (5, \'part2\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
+        sb.append("INSERT INTO TABLE joinTable4" + HiveTestUtil.CRLF +"VALUES (10, \'part2\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
+        
+        sb.append("SELECT A.ID, a.db, B.ID2 from joinTable3 A join joinTable4 B on A.ID = B.ID WHERE A.ID=10;" +
+                HiveTestUtil.CRLF);
+
+        String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString();
+        createFile(sb.toString(), fullPath);
+        runTest(testName, fullPath);
+        //Test that Phoenix has correctly mapped columns. We are checking both, primary key and
+        // regular columns mapped and not mapped
+        String phoenixQuery = "SELECT \"i1\", \"I2\", \"db\" FROM joinTable3 where \"i1\" = 10 AND \"I2\" = 'part1' AND \"db\" = 200";
+        PreparedStatement statement = conn.prepareStatement(phoenixQuery);
+        ResultSet rs = statement.executeQuery();
+        assert (rs.getMetaData().getColumnCount() == 3);
+        while (rs.next()) {
+            assert (rs.getInt(1) == 10);
+            assert (rs.getString(2).equalsIgnoreCase("part1"));
+            assert (rs.getDouble(3) == 200);
+        }
+    }
+
+    @Test
+    public void testTimestampPredicate() throws Exception {
+        String testName = "testTimeStampPredicate";
+        hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out"));
+        createFile("10\t2013-01-02 01:01:01.123456\n", new Path(hiveOutputDir, testName + ".out").toString());
+        createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString());
+
+        StringBuilder sb = new StringBuilder();
+        sb.append("CREATE EXTERNAL TABLE timeStampTable(ID int,ts TIMESTAMP)" + HiveTestUtil.CRLF +
+                " STORED BY  \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil
+                .CRLF +
+                " TBLPROPERTIES(" + HiveTestUtil.CRLF +
+                "   'phoenix.hbase.table.name'='TIMESTAMPTABLE'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
+                "   'phoenix.zookeeper.client.port'='" +
+                hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
+                "   'phoenix.column.mapping' = 'id:ID, ts:TS'," + HiveTestUtil.CRLF +
+                "   'phoenix.rowkeys'='id');" + HiveTestUtil.CRLF);
+        /*
+        Following query only for check that nanoseconds are correctly parsed with over 3 digits.
+         */
+        sb.append("INSERT INTO TABLE timeStampTable VALUES (10, \"2013-01-02 01:01:01.123456\");" + HiveTestUtil.CRLF);
+        sb.append("SELECT * from timeStampTable WHERE ts between '2012-01-02 01:01:01.123455' and " +
+                " '2015-01-02 12:01:02.123457789' AND id = 10;" + HiveTestUtil.CRLF);
+
+        String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString();
+        createFile(sb.toString(), fullPath);
+        runTest(testName, fullPath);
+    }
+}
diff --git a/phoenix-hive3/src/it/java/org/apache/phoenix/hive/HiveTestUtil.java b/phoenix-hive3/src/it/java/org/apache/phoenix/hive/HiveTestUtil.java
new file mode 100644
index 0000000..2144f08
--- /dev/null
+++ b/phoenix-hive3/src/it/java/org/apache/phoenix/hive/HiveTestUtil.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive;
+
+import org.apache.hadoop.hive.ql.QTestUtil;
+
+/**
+ * HiveTestUtil cloned from Hive QTestUtil. Can be outdated and may require update once a problem
+ * found.
+ */
+public class HiveTestUtil extends QTestUtil{
+    public static final String CRLF = System.getProperty("line.separator");
+
+    public HiveTestUtil(String outDir, String logDir, MiniClusterType clusterType, String confDir, String hadoopVer,
+                        String initScript, String cleanupScript, boolean withLlapIo) throws Exception {
+        super(outDir, logDir, clusterType, confDir, hadoopVer, initScript, cleanupScript, withLlapIo);
+    }
+
+    @Override
+    public int executeClient(String tname) {
+        conf.set("mapreduce.job.name", "test");
+        return super.executeClient(tname);
+    }
+
+}
diff --git a/phoenix-hive3/src/it/java/org/apache/phoenix/hive/HiveTezIT.java b/phoenix-hive3/src/it/java/org/apache/phoenix/hive/HiveTezIT.java
new file mode 100644
index 0000000..2931f93
--- /dev/null
+++ b/phoenix-hive3/src/it/java/org/apache/phoenix/hive/HiveTezIT.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.hive;
+
+import org.junit.BeforeClass;
+
+public class HiveTezIT extends HivePhoenixStoreIT {
+
+    @BeforeClass
+    public static void setUpBeforeClass() throws Exception {
+        setup(HiveTestUtil.MiniClusterType.tez);
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixMetaHook.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixMetaHook.java
new file mode 100644
index 0000000..eb63fc7
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixMetaHook.java
@@ -0,0 +1,245 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive;
+
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.metastore.HiveMetaHook;
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
+import org.apache.phoenix.hive.util.PhoenixConnectionUtil;
+import org.apache.phoenix.hive.util.PhoenixStorageHandlerUtil;
+import org.apache.phoenix.hive.util.PhoenixUtil;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.*;
+
+import static org.apache.phoenix.hive.util.ColumnMappingUtils.getColumnMappingMap;
+
+/**
+ * Implementation for notification methods which are invoked as part of transactions against the
+ * hive metastore,allowing Phoenix metadata to be kept in sync with Hive'smetastore.
+ */
+public class PhoenixMetaHook implements HiveMetaHook {
+
+    private static final Log LOG = LogFactory.getLog(PhoenixMetaHook.class);
+    private static final String EXTERNAL_TABLE_PURGE = "external.table.purge";
+
+    @Override
+    public void preCreateTable(Table table) throws MetaException {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Precreate table : " + table.getTableName());
+        }
+
+        try (Connection conn = PhoenixConnectionUtil.getConnection(table)) {
+            String tableType = table.getTableType();
+            String tableName = PhoenixStorageHandlerUtil.getTargetTableName(table);
+
+            if (TableType.EXTERNAL_TABLE.name().equals(tableType)) {
+                // Check whether phoenix table exists.
+                if (!PhoenixUtil.existTable(conn, tableName)) {
+                    //For Hive 3.0.0 only external tables are supported. If table doesn't exist,
+                    // a new table is created
+                    PhoenixUtil.createTable(conn, createTableStatement(table));
+                    Map<String, String> tableParameterMap = table.getParameters();
+                    tableParameterMap.put(EXTERNAL_TABLE_PURGE, "TRUE");
+                    table.setParameters(tableParameterMap);
+                }
+            } else if (TableType.MANAGED_TABLE.name().equals(tableType)) {
+                throw new MetaException("Only external table are supported for PhoenixStorageHandler");
+
+            } else {
+                throw new MetaException("Unsupported table Type: " + table.getTableType());
+            }
+
+            if (LOG.isDebugEnabled()) {
+                LOG.debug("Phoenix table " + tableName + " was created");
+            }
+        } catch (SQLException e) {
+            throw new MetaException(e.getMessage());
+        }
+    }
+
+    private String createTableStatement(Table table) throws MetaException {
+        Map<String, String> tableParameterMap = table.getParameters();
+
+        String tableName = PhoenixStorageHandlerUtil.getTargetTableName(table);
+        StringBuilder ddl = new StringBuilder("create table ").append(tableName).append(" (\n");
+
+        String phoenixRowKeys = tableParameterMap.get(PhoenixStorageHandlerConstants
+                .PHOENIX_ROWKEYS);
+        StringBuilder realRowKeys = new StringBuilder();
+        List<String> phoenixRowKeyList = new ArrayList<>();
+        for (String key:phoenixRowKeys.split(PhoenixStorageHandlerConstants.COMMA)) {
+            phoenixRowKeyList.add(key.trim());
+        }
+        Map<String, String> columnMappingMap = getColumnMappingMap(tableParameterMap.get
+                (PhoenixStorageHandlerConstants.PHOENIX_COLUMN_MAPPING));
+
+        List<FieldSchema> fieldSchemaList = table.getSd().getCols();
+        for (int i = 0, limit = fieldSchemaList.size(); i < limit; i++) {
+            FieldSchema fieldSchema = fieldSchemaList.get(i);
+            String fieldName = fieldSchema.getName();
+            String fieldType = fieldSchema.getType();
+            String columnType = PhoenixUtil.getPhoenixType(fieldType);
+
+            String rowKeyName = getRowKeyMapping(fieldName, phoenixRowKeyList);
+            if (rowKeyName != null) {
+                String columnName = columnMappingMap.get(fieldName);
+                if(columnName != null) {
+                    rowKeyName = columnName;
+                }
+                // In case of RowKey
+                if ("binary".equals(columnType)) {
+                    // Phoenix must define max length of binary when type definition. Obtaining
+                    // information from the column mapping. ex) phoenix.rowkeys = "r1, r2(100), ..."
+                    List<String> tokenList =
+                       new ArrayList<>();
+                    for (String name: rowKeyName.split("\\(|\\)")) {
+                        tokenList.add(name.trim());
+                    }
+                    columnType = columnType + "(" + tokenList.get(1) + ")";
+                    rowKeyName = tokenList.get(0);
+                }
+
+                ddl.append("  ").append("\"").append(rowKeyName).append("\"").append(" ").append(columnType).append(" not " +
+                        "null,\n");
+                realRowKeys.append("\"").append(rowKeyName).append("\",");
+            } else {
+                // In case of Column
+                String columnName = columnMappingMap.get(fieldName);
+
+                if (columnName == null) {
+                    // Use field definition.
+                    columnName = fieldName;
+                }
+
+                if ("binary".equals(columnType)) {
+                    // Phoenix must define max length of binary when type definition. Obtaining
+                    // information from the column mapping. ex) phoenix.column.mapping=c1:c1(100)
+                    List<String> tokenList = new ArrayList<>();
+                    for(String name: columnName.split("\\(|\\)")){
+                        tokenList.add(name.trim());
+                    }
+                    columnType = columnType + "(" + tokenList.get(1) + ")";
+                    columnName = tokenList.get(0);
+                }
+
+                ddl.append("  ").append("\"").append(columnName).append("\"").append(" ").append(columnType).append(",\n");
+            }
+        }
+        ddl.append("  ").append("constraint pk_").append(PhoenixUtil.getTableSchema(tableName.toUpperCase())[1]).append(" primary key(")
+                .append(realRowKeys.deleteCharAt(realRowKeys.length() - 1)).append(")\n)\n");
+
+        String tableOptions = tableParameterMap.get(PhoenixStorageHandlerConstants
+                .PHOENIX_TABLE_OPTIONS);
+        if (tableOptions != null) {
+            ddl.append(tableOptions);
+        }
+
+        String statement = ddl.toString();
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("DDL : " + statement);
+        }
+
+        return statement;
+    }
+
+    private String getRowKeyMapping(String rowKeyName, List<String> phoenixRowKeyList) {
+        String rowKeyMapping = null;
+
+        for (String phoenixRowKey : phoenixRowKeyList) {
+            if (phoenixRowKey.equals(rowKeyName)) {
+                rowKeyMapping = phoenixRowKey;
+                break;
+            } else if (phoenixRowKey.startsWith(rowKeyName + "(") && phoenixRowKey.endsWith(")")) {
+                rowKeyMapping = phoenixRowKey;
+                break;
+            }
+        }
+
+        return rowKeyMapping;
+    }
+
+    @Override
+    public void rollbackCreateTable(Table table) throws MetaException {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Rollback for table : " + table.getTableName());
+        }
+
+        dropTableIfExist(table);
+    }
+
+    @Override
+    public void commitCreateTable(Table table) throws MetaException {
+
+    }
+
+    @Override
+    public void preDropTable(Table table) throws MetaException {
+    }
+
+    @Override
+    public void rollbackDropTable(Table table) throws MetaException {
+    }
+
+    @Override
+    public void commitDropTable(Table table, boolean deleteData) throws MetaException {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Commit drop table : " + table.getTableName());
+        }
+
+        dropTableIfExist(table);
+    }
+
+    private void dropTableIfExist(Table table) throws MetaException {
+        try (Connection conn = PhoenixConnectionUtil.getConnection(table)) {
+            String tableName = PhoenixStorageHandlerUtil.getTargetTableName(table);
+
+            if (isExternalTablePurge(table)) {
+                // Drop if phoenix table exist.
+                if (PhoenixUtil.existTable(conn, tableName)) {
+                    PhoenixUtil.dropTable(conn, tableName);
+                }
+            }
+        } catch (SQLException e) {
+            throw new MetaException(e.getMessage());
+        }
+    }
+
+    private  boolean isExternalTablePurge(Table table) {
+        if (table == null) {
+            return false;
+        }
+        Map<String, String> params = table.getParameters();
+        if (params == null) {
+            return false;
+        }
+        return isPropertyTrue(params, EXTERNAL_TABLE_PURGE);
+    }
+
+    private boolean isPropertyTrue(Map<String, String> tableParams, String prop) {
+        return "TRUE".equalsIgnoreCase(tableParams.get(prop));
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixRecordUpdater.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixRecordUpdater.java
new file mode 100644
index 0000000..1f26df1
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixRecordUpdater.java
@@ -0,0 +1,341 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.util.Properties;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
+import org.apache.hadoop.hive.ql.io.RecordUpdater;
+import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.SerDeStats;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.phoenix.hive.PhoenixSerializer.DmlType;
+import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
+import org.apache.phoenix.hive.mapreduce.PhoenixResultWritable;
+import org.apache.phoenix.hive.util.PhoenixConnectionUtil;
+import org.apache.phoenix.hive.util.PhoenixStorageHandlerUtil;
+import org.apache.phoenix.hive.util.PhoenixUtil;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
+import org.apache.phoenix.schema.ConcurrentTableMutationException;
+import org.apache.phoenix.schema.MetaDataClient;
+import org.apache.phoenix.util.QueryUtil;
+
+public class PhoenixRecordUpdater implements RecordUpdater {
+
+    private static final Log LOG = LogFactory.getLog(PhoenixRecordUpdater.class);
+
+    private final Connection conn;
+    private final PreparedStatement pstmt;
+    private final long batchSize;
+    private long numRecords = 0;
+
+    private Configuration config;
+    private String tableName;
+    private MetaDataClient metaDataClient;
+    private boolean restoreWalMode;
+
+    private long rowCountDelta = 0;
+
+    private PhoenixSerializer phoenixSerializer;
+    private ObjectInspector objInspector;
+    private PreparedStatement pstmtForDelete;
+
+    public PhoenixRecordUpdater(Path path, AcidOutputFormat.Options options) throws IOException {
+        this.config = options.getConfiguration();
+        tableName = config.get(PhoenixStorageHandlerConstants.PHOENIX_TABLE_NAME);
+
+        Properties props = new Properties();
+
+        try {
+            // Disable WAL
+            String walConfigName = tableName.toLowerCase() + PhoenixStorageHandlerConstants
+                    .DISABLE_WAL;
+            boolean disableWal = config.getBoolean(walConfigName, false);
+            if (disableWal) {
+                if (LOG.isDebugEnabled()) {
+                    LOG.debug(walConfigName + " is true. batch.mode will be set true.");
+                }
+
+                props.setProperty(PhoenixStorageHandlerConstants.BATCH_MODE, "true");
+            }
+
+            this.conn = PhoenixConnectionUtil.getInputConnection(config, props);
+
+            if (disableWal) {
+                metaDataClient = new MetaDataClient((PhoenixConnection) conn);
+
+                if (!PhoenixUtil.isDisabledWal(metaDataClient, tableName)) {
+                    // execute alter tablel statement if disable_wal is not true.
+                    try {
+                        PhoenixUtil.alterTableForWalDisable(conn, tableName, true);
+                    } catch (ConcurrentTableMutationException e) {
+                        if (LOG.isWarnEnabled()) {
+                            LOG.warn("Concurrent modification of disableWAL");
+                        }
+                    }
+
+                    if (LOG.isDebugEnabled()) {
+                        LOG.debug(tableName + "s wal disabled.");
+                    }
+
+                    // restore original value of disable_wal at the end.
+                    restoreWalMode = true;
+                }
+            }
+
+            this.batchSize = PhoenixConfigurationUtil.getBatchSize(config);
+
+            if (LOG.isDebugEnabled()) {
+                LOG.debug("Batch-size : " + batchSize);
+            }
+
+            String upsertQuery = QueryUtil.constructUpsertStatement(tableName, PhoenixUtil
+                    .getColumnInfoList(conn, tableName));
+
+            if (LOG.isDebugEnabled()) {
+                LOG.debug("Upsert-query : " + upsertQuery);
+            }
+            this.pstmt = this.conn.prepareStatement(upsertQuery);
+        } catch (SQLException e) {
+            throw new IOException(e);
+        }
+
+        this.objInspector = options.getInspector();
+        try {
+            phoenixSerializer = new PhoenixSerializer(config, options.getTableProperties());
+        } catch (SerDeException e) {
+            throw new IOException(e);
+        }
+    }
+
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hive.ql.io.RecordUpdater#insert(long, java.lang.Object)
+     */
+    @Override
+    public void insert(long currentTransaction, Object row) throws IOException {
+        if (LOG.isTraceEnabled()) {
+            LOG.trace("Insert - currentTranscation : " + currentTransaction + ", row : " +
+                    PhoenixStorageHandlerUtil.toString(row));
+        }
+
+        PhoenixResultWritable pResultWritable = (PhoenixResultWritable) phoenixSerializer
+                .serialize(row, objInspector, DmlType.INSERT);
+
+        if (LOG.isTraceEnabled()) {
+            LOG.trace("Data : " + pResultWritable.getValueList());
+        }
+
+        write(pResultWritable);
+
+        rowCountDelta++;
+    }
+
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hive.ql.io.RecordUpdater#update(long, java.lang.Object)
+     */
+    @Override
+    public void update(long currentTransaction, Object row) throws IOException {
+        if (LOG.isTraceEnabled()) {
+            LOG.trace("Update - currentTranscation : " + currentTransaction + ", row : " +
+                    PhoenixStorageHandlerUtil.toString(row));
+        }
+
+        PhoenixResultWritable pResultWritable = (PhoenixResultWritable) phoenixSerializer
+                .serialize(row, objInspector, DmlType.UPDATE);
+
+        if (LOG.isTraceEnabled()) {
+            LOG.trace("Data : " + pResultWritable.getValueList());
+        }
+
+        write(pResultWritable);
+    }
+
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hive.ql.io.RecordUpdater#delete(long, java.lang.Object)
+     */
+    @Override
+    public void delete(long currentTransaction, Object row) throws IOException {
+        if (LOG.isTraceEnabled()) {
+            LOG.trace("Delete - currentTranscation : " + currentTransaction + ", row : " +
+                    PhoenixStorageHandlerUtil.toString(row));
+        }
+
+        PhoenixResultWritable pResultWritable = (PhoenixResultWritable) phoenixSerializer
+                .serialize(row, objInspector, DmlType.DELETE);
+
+        if (LOG.isTraceEnabled()) {
+            LOG.trace("Data : " + pResultWritable.getValueList());
+        }
+
+        if (pstmtForDelete == null) {
+            try {
+                String deleteQuery = PhoenixUtil.constructDeleteStatement(conn, tableName);
+
+                if (LOG.isDebugEnabled()) {
+                    LOG.debug("Delete query : " + deleteQuery);
+                }
+
+                pstmtForDelete = conn.prepareStatement(deleteQuery);
+            } catch (SQLException e) {
+                throw new IOException(e);
+            }
+        }
+
+        delete(pResultWritable);
+
+        rowCountDelta--;
+    }
+
+    private void delete(PhoenixResultWritable pResultWritable) throws IOException {
+        try {
+            pResultWritable.delete(pstmtForDelete);
+            numRecords++;
+            pstmtForDelete.executeUpdate();
+
+            if (numRecords % batchSize == 0) {
+                LOG.debug("Commit called on a batch of size : " + batchSize);
+                conn.commit();
+            }
+        } catch (SQLException e) {
+            throw new IOException("Exception while deleting to table.", e);
+        }
+    }
+
+    private void write(PhoenixResultWritable pResultWritable) throws IOException {
+        try {
+            pResultWritable.write(pstmt);
+            numRecords++;
+            pstmt.executeUpdate();
+
+            if (numRecords % batchSize == 0) {
+                LOG.debug("Commit called on a batch of size : " + batchSize);
+                conn.commit();
+            }
+        } catch (SQLException e) {
+            throw new IOException("Exception while writing to table.", e);
+        }
+    }
+
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hive.ql.io.RecordUpdater#flush()
+     */
+    @Override
+    public void flush() throws IOException {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Flush called");
+        }
+
+        try {
+            conn.commit();
+
+            if (LOG.isInfoEnabled()) {
+                LOG.info("Written row : " + numRecords);
+            }
+        } catch (SQLException e) {
+            LOG.error("SQLException while performing the commit for the task.");
+            throw new IOException(e);
+        }
+    }
+
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hive.ql.io.RecordUpdater#close(boolean)
+     */
+    @Override
+    public void close(boolean abort) throws IOException {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("abort : " + abort);
+        }
+
+        try {
+            conn.commit();
+
+            if (LOG.isInfoEnabled()) {
+                LOG.info("Written row : " + numRecords);
+            }
+        } catch (SQLException e) {
+            LOG.error("SQLException while performing the commit for the task.");
+            throw new IOException(e);
+        } finally {
+            try {
+                if (restoreWalMode && PhoenixUtil.isDisabledWal(metaDataClient, tableName)) {
+                    try {
+                        PhoenixUtil.alterTableForWalDisable(conn, tableName, false);
+                    } catch (ConcurrentTableMutationException e) {
+                        if (LOG.isWarnEnabled()) {
+                            LOG.warn("Concurrent modification of disableWAL");
+                        }
+                    }
+
+                    if (LOG.isDebugEnabled()) {
+                        LOG.debug(tableName + "s wal enabled.");
+                    }
+                }
+
+                // flush when [table-name].auto.flush is true.
+                String autoFlushConfigName = tableName.toLowerCase() +
+                        PhoenixStorageHandlerConstants.AUTO_FLUSH;
+                boolean autoFlush = config.getBoolean(autoFlushConfigName, false);
+                if (autoFlush) {
+                    if (LOG.isDebugEnabled()) {
+                        LOG.debug("autoFlush is " + autoFlush);
+                    }
+
+                    PhoenixUtil.flush(conn, tableName);
+                }
+
+                PhoenixUtil.closeResource(pstmt);
+                PhoenixUtil.closeResource(pstmtForDelete);
+                PhoenixUtil.closeResource(conn);
+            } catch (SQLException ex) {
+                LOG.error("SQLException while closing the connection for the task.");
+                throw new IOException(ex);
+            }
+        }
+    }
+
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hive.ql.io.RecordUpdater#getStats()
+     */
+    @Override
+    public SerDeStats getStats() {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("getStats called");
+        }
+
+        SerDeStats stats = new SerDeStats();
+        stats.setRowCount(rowCountDelta);
+        // Don't worry about setting raw data size diff.  There is no reasonable way  to calculate
+        // that without finding the row we are updating or deleting, which would be a mess.
+        return stats;
+    }
+
+    @Override
+    public long getBufferedRowCount() {
+        return numRecords;
+    }
+
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixRow.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixRow.java
new file mode 100644
index 0000000..cae8f6c
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixRow.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive;
+
+import org.apache.hadoop.hive.serde2.StructObject;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Implementation for Hive SerDe StructObject
+ */
+public class PhoenixRow implements StructObject {
+
+    private List<String> columnList;
+    private Map<String, Object> resultRowMap;
+
+    public PhoenixRow(List<String> columnList) {
+        this.columnList = columnList;
+    }
+
+    public PhoenixRow setResultRowMap(Map<String, Object> resultRowMap) {
+        this.resultRowMap = resultRowMap;
+        return this;
+    }
+
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hive.serde2.StructObject#getField(int)
+     */
+    @Override
+    public Object getField(int fieldID) {
+        return resultRowMap.get(columnList.get(fieldID));
+    }
+
+    /* (non-Javadoc)
+     * @see org.apache.hadoop.hive.serde2.StructObject#getFieldsAsList()
+     */
+    @Override
+    public List<Object> getFieldsAsList() {
+        return new ArrayList<>(resultRowMap.values());
+    }
+
+
+    @Override
+    public String toString() {
+        return resultRowMap.toString();
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixRowKey.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixRowKey.java
new file mode 100644
index 0000000..a963fba
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixRowKey.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive;
+
+import org.apache.hadoop.hive.ql.io.RecordIdentifier;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Map;
+
+/**
+ * Hive's RecordIdentifier implementation.
+ */
+
+public class PhoenixRowKey extends RecordIdentifier {
+
+    private PrimaryKeyData rowKeyMap = PrimaryKeyData.EMPTY;
+
+    public PhoenixRowKey() {
+
+    }
+
+    public void setRowKeyMap(Map<String, Object> rowKeyMap) {
+        this.rowKeyMap = new PrimaryKeyData(rowKeyMap);
+    }
+
+    @Override
+    public void write(DataOutput dataOutput) throws IOException {
+        super.write(dataOutput);
+
+        rowKeyMap.serialize((OutputStream) dataOutput);
+    }
+
+    @Override
+    public void readFields(DataInput dataInput) throws IOException {
+        super.readFields(dataInput);
+
+        try {
+            rowKeyMap = PrimaryKeyData.deserialize((InputStream) dataInput);
+        } catch (ClassNotFoundException e) {
+            throw new RuntimeException(e);
+        }
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixSerDe.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixSerDe.java
new file mode 100644
index 0000000..9b5083d
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixSerDe.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.AbstractSerDe;
+import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.SerDeStats;
+import org.apache.hadoop.hive.serde2.lazy.LazySerDeParameters;
+import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazyObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.ObjectInspectorOptions;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.io.Writable;
+import org.apache.phoenix.hive.PhoenixSerializer.DmlType;
+import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
+import org.apache.phoenix.hive.mapreduce.PhoenixResultWritable;
+import org.apache.phoenix.hive.objectinspector.PhoenixObjectInspectorFactory;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Properties;
+
+/**
+ * SerDe implementation for Phoenix Hive Storage
+ *
+ */
+public class PhoenixSerDe extends AbstractSerDe {
+
+    public static final Log LOG = LogFactory.getLog(PhoenixSerDe.class);
+
+    private PhoenixSerializer serializer;
+    private ObjectInspector objectInspector;
+
+    private LazySerDeParameters serdeParams;
+    private PhoenixRow row;
+
+    private Properties tableProperties;
+
+    /**
+     * @throws SerDeException
+     */
+    public PhoenixSerDe() throws SerDeException {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("PhoenixSerDe created");
+        }
+    }
+
+    @Override
+    public void initialize(Configuration conf, Properties tbl) throws SerDeException {
+        tableProperties = tbl;
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("SerDe initialize : " + tbl.getProperty("name"));
+        }
+
+        serdeParams = new LazySerDeParameters(conf, tbl, getClass().getName());
+        objectInspector = createLazyPhoenixInspector(conf, tbl);
+
+        String inOutWork = tbl.getProperty(PhoenixStorageHandlerConstants.IN_OUT_WORK);
+        if (inOutWork == null) {
+            return;
+        }
+
+        serializer = new PhoenixSerializer(conf, tbl);
+        row = new PhoenixRow(serdeParams.getColumnNames());
+    }
+
+    @Override
+    public Object deserialize(Writable result) throws SerDeException {
+        if (!(result instanceof PhoenixResultWritable)) {
+            throw new SerDeException(result.getClass().getName() + ": expects " +
+                    "PhoenixResultWritable!");
+        }
+
+        return row.setResultRowMap(((PhoenixResultWritable) result).getResultMap());
+    }
+
+    @Override
+    public Class<? extends Writable> getSerializedClass() {
+        return PhoenixResultWritable.class;
+    }
+
+    @Override
+    public Writable serialize(Object obj, ObjectInspector objInspector) throws SerDeException {
+        try {
+            return serializer.serialize(obj, objInspector, DmlType.NONE);
+        } catch (Exception e) {
+            throw new SerDeException(e);
+        }
+    }
+
+    @Override
+    public SerDeStats getSerDeStats() {
+        // no support for statistics
+        return null;
+    }
+
+    public Properties getTableProperties() {
+        return tableProperties;
+    }
+
+    public LazySerDeParameters getSerdeParams() {
+        return serdeParams;
+    }
+
+    @Override
+    public ObjectInspector getObjectInspector() throws SerDeException {
+        return objectInspector;
+    }
+
+    private ObjectInspector createLazyPhoenixInspector(Configuration conf, Properties tbl) throws
+            SerDeException {
+        List<String> columnNameList = Arrays.asList(tbl.getProperty(serdeConstants.LIST_COLUMNS)
+                .split(PhoenixStorageHandlerConstants.COMMA));
+        List<TypeInfo> columnTypeList = TypeInfoUtils.getTypeInfosFromTypeString(tbl.getProperty
+                (serdeConstants.LIST_COLUMN_TYPES));
+
+        List<ObjectInspector> columnObjectInspectors = new ArrayList<>(columnTypeList.size());
+
+        for (TypeInfo typeInfo : columnTypeList) {
+            columnObjectInspectors.add(PhoenixObjectInspectorFactory.createObjectInspector
+                    (typeInfo, serdeParams));
+        }
+
+        return LazyObjectInspectorFactory.getLazySimpleStructObjectInspector(columnNameList,
+                columnObjectInspectors, null, serdeParams.getSeparators()[0], serdeParams,
+                ObjectInspectorOptions.JAVA);
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixSerializer.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixSerializer.java
new file mode 100644
index 0000000..9f3d35c
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixSerializer.java
@@ -0,0 +1,210 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.common.type.Date;
+import org.apache.hadoop.hive.common.type.HiveChar;
+import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.common.type.Timestamp;
+import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.StructField;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
+import org.apache.phoenix.hive.mapreduce.PhoenixResultWritable;
+import org.apache.phoenix.hive.util.PhoenixConnectionUtil;
+import org.apache.phoenix.hive.util.PhoenixStorageHandlerUtil;
+import org.apache.phoenix.hive.util.PhoenixUtil;
+import org.apache.phoenix.util.ColumnInfo;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * Serializer used in PhoenixSerDe and PhoenixRecordUpdater to produce Writable.
+ */
+public class PhoenixSerializer {
+
+    private static final Log LOG = LogFactory.getLog(PhoenixSerializer.class);
+
+    public static enum DmlType {
+        NONE,
+        SELECT,
+        INSERT,
+        UPDATE,
+        DELETE
+    }
+
+    private int columnCount = 0;
+    private PhoenixResultWritable pResultWritable;
+
+    public PhoenixSerializer(Configuration config, Properties tbl) throws SerDeException {
+        String mapping = tbl.getProperty(PhoenixStorageHandlerConstants.PHOENIX_COLUMN_MAPPING,
+          null);
+        if(mapping != null) {
+            config.set(PhoenixStorageHandlerConstants.PHOENIX_COLUMN_MAPPING, mapping);
+        }
+
+        // Populate the table properties into config, because these values are used in
+        // the initialization of PhoenixResultWritable if the table is transactional
+        String tableName = tbl.getProperty(PhoenixStorageHandlerConstants.PHOENIX_TABLE_NAME);
+        config.set(PhoenixStorageHandlerConstants.PHOENIX_TABLE_NAME, tableName);
+
+        config.set(PhoenixStorageHandlerConstants.PHOENIX_ROWKEYS,
+            tbl.getProperty(PhoenixStorageHandlerConstants.PHOENIX_ROWKEYS));
+
+        String quorum = config.get(PhoenixStorageHandlerConstants.ZOOKEEPER_QUORUM);
+        if (quorum == null) {
+            config.set(PhoenixStorageHandlerConstants.ZOOKEEPER_QUORUM,
+              tbl.getProperty(PhoenixStorageHandlerConstants.ZOOKEEPER_QUORUM,
+                PhoenixStorageHandlerConstants.DEFAULT_ZOOKEEPER_QUORUM));
+        }
+
+        int zooKeeperClientPort =
+          config.getInt(PhoenixStorageHandlerConstants.ZOOKEEPER_PORT, 0);
+        if (zooKeeperClientPort == 0) {
+            config.setInt(PhoenixStorageHandlerConstants.ZOOKEEPER_PORT,
+              Integer.parseInt(tbl.getProperty(PhoenixStorageHandlerConstants.ZOOKEEPER_PORT,
+                String.valueOf(PhoenixStorageHandlerConstants.DEFAULT_ZOOKEEPER_PORT))));
+        }
+
+        String zNodeParent = config.get(PhoenixStorageHandlerConstants.ZOOKEEPER_PARENT);
+        if (zNodeParent == null) {
+            config.set(PhoenixStorageHandlerConstants.ZOOKEEPER_PARENT,
+              tbl.getProperty(PhoenixStorageHandlerConstants.ZOOKEEPER_PARENT,
+                PhoenixStorageHandlerConstants.DEFAULT_ZOOKEEPER_PARENT));
+        }
+
+        try (Connection conn = PhoenixConnectionUtil.getInputConnection(config, tbl)) {
+            List<ColumnInfo> columnMetadata = PhoenixUtil.getColumnInfoList(conn, tableName);
+
+            columnCount = columnMetadata.size();
+
+            if (LOG.isDebugEnabled()) {
+                LOG.debug("Column-meta : " + columnMetadata);
+            }
+
+            pResultWritable = new PhoenixResultWritable(config, columnMetadata);
+        } catch (SQLException | IOException e) {
+            throw new SerDeException(e);
+        }
+    }
+
+    public Writable serialize(Object values, ObjectInspector objInspector, DmlType dmlType) {
+        pResultWritable.clear();
+
+        final StructObjectInspector structInspector = (StructObjectInspector) objInspector;
+        final List<? extends StructField> fieldList = structInspector.getAllStructFieldRefs();
+
+        if (LOG.isTraceEnabled()) {
+            LOG.trace("FieldList : " + fieldList + " values(" + values.getClass() + ") : " +
+                    values);
+        }
+
+        int fieldCount = columnCount;
+        if (dmlType == DmlType.UPDATE || dmlType == DmlType.DELETE) {
+            fieldCount++;
+        }
+
+        for (int i = 0; i < fieldCount; i++) {
+            if (fieldList.size() <= i) {
+                break;
+            }
+
+            StructField structField = fieldList.get(i);
+
+            if (LOG.isTraceEnabled()) {
+                LOG.trace("structField[" + i + "] : " + structField);
+            }
+
+            if (structField != null) {
+                Object fieldValue = structInspector.getStructFieldData(values, structField);
+                ObjectInspector fieldOI = structField.getFieldObjectInspector();
+
+                String fieldName = structField.getFieldName();
+
+                if (LOG.isTraceEnabled()) {
+                    LOG.trace("Field " + fieldName + "[" + i + "] : " + fieldValue + ", " +
+                            fieldOI);
+                }
+
+                Object value = null;
+                switch (fieldOI.getCategory()) {
+                    case PRIMITIVE:
+                        value = ((PrimitiveObjectInspector) fieldOI).getPrimitiveJavaObject
+                                (fieldValue);
+
+                        if (LOG.isTraceEnabled()) {
+                            LOG.trace("Field " + fieldName + "[" + i + "] : " + value + "(" + value
+                                    .getClass() + ")");
+                        }
+
+                        if (value instanceof HiveDecimal) {
+                            value = ((HiveDecimal) value).bigDecimalValue();
+                        } else if (value instanceof HiveChar) {
+                            value = ((HiveChar) value).getValue().trim();
+                        } else if (value instanceof Date) {
+                            value = java.sql.Date.valueOf(value.toString());
+                        } else if (value instanceof Timestamp) {
+                            value = java.sql.Timestamp.valueOf(value.toString());
+                        }
+
+                        pResultWritable.add(value);
+                        break;
+                    case LIST:
+                    // Not support for arrays in insert statement yet
+                        break;
+                    case STRUCT:
+                        if (dmlType == DmlType.DELETE) {
+                            // When update/delete, First value is struct<transactionid:bigint,
+                            // bucketid:int,rowid:bigint,primaryKey:binary>>
+                            List<Object> fieldValueList = ((StandardStructObjectInspector)
+                                    fieldOI).getStructFieldsDataAsList(fieldValue);
+
+                            // convert to map from binary of primary key.
+                            @SuppressWarnings("unchecked")
+                            Map<String, Object> primaryKeyMap = (Map<String, Object>)
+                                    PhoenixStorageHandlerUtil.toMap(((BytesWritable)
+                                            fieldValueList.get(3)).getBytes());
+                            for (Object pkValue : primaryKeyMap.values()) {
+                                pResultWritable.add(pkValue);
+                            }
+                        }
+
+                        break;
+                    default:
+                        new SerDeException("Phoenix Unsupported column type: " + fieldOI
+                                .getCategory());
+                }
+            }
+        }
+
+        return pResultWritable;
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixStorageHandler.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixStorageHandler.java
new file mode 100644
index 0000000..0f8ee93
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PhoenixStorageHandler.java
@@ -0,0 +1,276 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.mapred.TableMapReduceUtil;
+import org.apache.hadoop.hive.common.JavaUtils;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaHook;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.ql.exec.TableScanOperator;
+import org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.HiveStoragePredicateHandler;
+import org.apache.hadoop.hive.ql.metadata.InputEstimator;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.serde2.AbstractSerDe;
+import org.apache.hadoop.hive.serde2.Deserializer;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.OutputFormat;
+import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
+import org.apache.phoenix.hive.mapreduce.PhoenixInputFormat;
+import org.apache.phoenix.hive.mapreduce.PhoenixOutputFormat;
+import org.apache.phoenix.hive.ppd.PhoenixPredicateDecomposer;
+import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.UUID;
+
+/**
+ * This class manages all the Phoenix/Hive table initial configurations and SerDe Election
+ */
+@SuppressWarnings("deprecation")
+public class PhoenixStorageHandler extends DefaultStorageHandler implements
+        HiveStoragePredicateHandler, InputEstimator {
+
+
+    private Configuration jobConf;
+    private Configuration hbaseConf;
+
+
+    @Override
+    public void setConf(Configuration conf) {
+        jobConf = conf;
+        hbaseConf = HBaseConfiguration.create(conf);
+    }
+
+    @Override
+    public Configuration getConf() {
+        return hbaseConf;
+    }
+
+    private static final Log LOG = LogFactory.getLog(PhoenixStorageHandler.class);
+
+    public PhoenixStorageHandler() {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("PhoenixStorageHandler created");
+        }
+    }
+
+    @Override
+    public HiveMetaHook getMetaHook() {
+        return new PhoenixMetaHook();
+    }
+
+    @Override
+    public void configureJobConf(TableDesc tableDesc, JobConf jobConf) {
+        try {
+            TableMapReduceUtil.addDependencyJars(jobConf);
+            org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(jobConf,
+                    PhoenixStorageHandler.class);
+            JobConf hbaseJobConf = new JobConf(getConf());
+            org.apache.hadoop.hbase.mapred.TableMapReduceUtil.initCredentials(hbaseJobConf);
+            ShimLoader.getHadoopShims().mergeCredentials(jobConf, hbaseJobConf);
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        }
+
+
+    }
+
+    @SuppressWarnings("rawtypes")
+    @Override
+    public Class<? extends OutputFormat> getOutputFormatClass() {
+        return PhoenixOutputFormat.class;
+    }
+
+    @SuppressWarnings("rawtypes")
+    @Override
+    public Class<? extends InputFormat> getInputFormatClass() {
+        return PhoenixInputFormat.class;
+    }
+
+    @Override
+    public void configureInputJobProperties(TableDesc tableDesc, Map<String, String>
+            jobProperties) {
+        configureJobProperties(tableDesc, jobProperties);
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Configuring input job for table : " + tableDesc.getTableName());
+        }
+
+        // initialization efficiency. Inform to SerDe about in/out work.
+        tableDesc.getProperties().setProperty(PhoenixStorageHandlerConstants.IN_OUT_WORK,
+                PhoenixStorageHandlerConstants.IN_WORK);
+    }
+
+    @Override
+    public void configureOutputJobProperties(TableDesc tableDesc, Map<String, String>
+            jobProperties) {
+        configureJobProperties(tableDesc, jobProperties);
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Configuring output job for  table : " + tableDesc.getTableName());
+        }
+
+        // initialization efficiency. Inform to SerDe about in/out work.
+        tableDesc.getProperties().setProperty(PhoenixStorageHandlerConstants.IN_OUT_WORK,
+                PhoenixStorageHandlerConstants.OUT_WORK);
+    }
+
+    @Override
+    public void configureTableJobProperties(TableDesc tableDesc, Map<String, String>
+            jobProperties) {
+        configureJobProperties(tableDesc, jobProperties);
+    }
+
+    @SuppressWarnings({"unchecked", "rawtypes"})
+    protected void configureJobProperties(TableDesc tableDesc, Map<String, String> jobProperties) {
+        Properties tableProperties = tableDesc.getProperties();
+
+        String inputFormatClassName =
+                tableProperties.getProperty(PhoenixStorageHandlerConstants
+                        .HBASE_INPUT_FORMAT_CLASS);
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug(PhoenixStorageHandlerConstants.HBASE_INPUT_FORMAT_CLASS + " is " +
+                    inputFormatClassName);
+        }
+
+        Class<?> inputFormatClass;
+        try {
+            if (inputFormatClassName != null) {
+                inputFormatClass = JavaUtils.loadClass(inputFormatClassName);
+            } else {
+                inputFormatClass = PhoenixInputFormat.class;
+            }
+        } catch (Exception e) {
+            LOG.error(e.getMessage(), e);
+            throw new RuntimeException(e);
+        }
+
+        if (inputFormatClass != null) {
+            tableDesc.setInputFileFormatClass((Class<? extends InputFormat>) inputFormatClass);
+        }
+
+        String tableName = tableProperties.getProperty(PhoenixStorageHandlerConstants
+                .PHOENIX_TABLE_NAME);
+        if (tableName == null) {
+            tableName = tableDesc.getTableName();
+            tableProperties.setProperty(PhoenixStorageHandlerConstants.PHOENIX_TABLE_NAME,
+                    tableName);
+        }
+        SessionState sessionState = SessionState.get();
+
+        String sessionId;
+        if(sessionState!= null) {
+            sessionId = sessionState.getSessionId();
+        }  else {
+            sessionId = UUID.randomUUID().toString();
+        }
+        jobProperties.put(PhoenixConfigurationUtil.SESSION_ID, sessionId);
+        jobProperties.put(PhoenixConfigurationUtil.INPUT_TABLE_NAME, tableName);
+        jobProperties.put(PhoenixStorageHandlerConstants.ZOOKEEPER_QUORUM, tableProperties
+                .getProperty(PhoenixStorageHandlerConstants.ZOOKEEPER_QUORUM,
+                        PhoenixStorageHandlerConstants.DEFAULT_ZOOKEEPER_QUORUM));
+        jobProperties.put(PhoenixStorageHandlerConstants.ZOOKEEPER_PORT, tableProperties
+                .getProperty(PhoenixStorageHandlerConstants.ZOOKEEPER_PORT, String.valueOf
+                        (PhoenixStorageHandlerConstants.DEFAULT_ZOOKEEPER_PORT)));
+        jobProperties.put(PhoenixStorageHandlerConstants.ZOOKEEPER_PARENT, tableProperties
+                .getProperty(PhoenixStorageHandlerConstants.ZOOKEEPER_PARENT,
+                        PhoenixStorageHandlerConstants.DEFAULT_ZOOKEEPER_PARENT));
+        String columnMapping = tableProperties
+                .getProperty(PhoenixStorageHandlerConstants.PHOENIX_COLUMN_MAPPING);
+        if(columnMapping != null) {
+            jobProperties.put(PhoenixStorageHandlerConstants.PHOENIX_COLUMN_MAPPING, columnMapping);
+        }
+
+        jobProperties.put(hive_metastoreConstants.META_TABLE_STORAGE, this.getClass().getName());
+
+        // set configuration when direct work with HBase.
+        jobProperties.put(HConstants.ZOOKEEPER_QUORUM, jobProperties.get
+                (PhoenixStorageHandlerConstants.ZOOKEEPER_QUORUM));
+        jobProperties.put(HConstants.ZOOKEEPER_CLIENT_PORT, jobProperties.get
+                (PhoenixStorageHandlerConstants.ZOOKEEPER_PORT));
+        jobProperties.put(HConstants.ZOOKEEPER_ZNODE_PARENT, jobProperties.get
+                (PhoenixStorageHandlerConstants.ZOOKEEPER_PARENT));
+        addHBaseResources(jobConf, jobProperties);
+    }
+
+    /**
+     * Utility method to add hbase-default.xml and hbase-site.xml properties to a new map
+     * if they are not already present in the jobConf.
+     * @param jobConf Job configuration
+     * @param newJobProperties  Map to which new properties should be added
+     */
+    private void addHBaseResources(Configuration jobConf,
+                                   Map<String, String> newJobProperties) {
+        Configuration conf = new Configuration(false);
+        HBaseConfiguration.addHbaseResources(conf);
+        for (Map.Entry<String, String> entry : conf) {
+            if (jobConf.get(entry.getKey()) == null) {
+                newJobProperties.put(entry.getKey(), entry.getValue());
+            }
+        }
+    }
+
+    @Override
+    public Class<? extends AbstractSerDe> getSerDeClass() {
+        return PhoenixSerDe.class;
+    }
+
+    @Override
+    public DecomposedPredicate decomposePredicate(JobConf jobConf, Deserializer deserializer,
+                                                  ExprNodeDesc predicate) {
+        PhoenixSerDe phoenixSerDe = (PhoenixSerDe) deserializer;
+        List<String> columnNameList = phoenixSerDe.getSerdeParams().getColumnNames();
+
+        return PhoenixPredicateDecomposer.create(columnNameList).decomposePredicate(predicate);
+    }
+
+    @Override
+    public Estimation estimate(JobConf job, TableScanOperator ts, long remaining) throws
+            HiveException {
+        String hiveTableName = ts.getConf().getTableMetadata().getTableName();
+        int reducerCount = job.getInt(hiveTableName + PhoenixStorageHandlerConstants
+                .PHOENIX_REDUCER_NUMBER, 1);
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Estimating input size for table: " + hiveTableName + " with reducer count " +
+                    reducerCount + ". Remaining : " + remaining);
+        }
+
+        long bytesPerReducer = job.getLong(HiveConf.ConfVars.BYTESPERREDUCER.varname,
+                Long.parseLong(HiveConf.ConfVars.BYTESPERREDUCER.getDefaultValue()));
+        long totalLength = reducerCount * bytesPerReducer;
+
+        return new Estimation(0, totalLength);
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PrimaryKeyData.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PrimaryKeyData.java
new file mode 100644
index 0000000..7773997
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/PrimaryKeyData.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InvalidClassException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamClass;
+import java.io.OutputStream;
+import java.io.Serializable;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+/**
+ * Wrapper around the primary key data for Hive.
+ */
+public class PrimaryKeyData implements Serializable{
+    public static final PrimaryKeyData EMPTY = new PrimaryKeyData(Collections.<String,Object> emptyMap());
+    private static final long serialVersionUID = 1L;
+
+    // Based on https://www.ibm.com/developerworks/library/se-lookahead/. Prevents unexpected
+    // deserialization of other objects of an unexpected class.
+    private static class LookAheadObjectInputStream extends ObjectInputStream {
+        public LookAheadObjectInputStream(InputStream in) throws IOException {
+            super(in);
+        }
+
+      @Override
+      protected Class<?> resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException {
+          if (!desc.getName().equals(PrimaryKeyData.class.getName()) &&
+                  !desc.getName().startsWith("java.lang.") &&
+                  !desc.getName().startsWith("java.util.") &&
+                  !desc.getName().startsWith("java.sql.")) {
+              throw new InvalidClassException(desc.getName(), "Expected an instance of PrimaryKeyData");
+          }
+          return super.resolveClass(desc);
+      }
+  }
+
+    private final HashMap<String,Object> data;
+
+    public PrimaryKeyData(Map<String,Object> data) {
+        if (data instanceof HashMap) {
+            this.data = (HashMap<String,Object>) data;
+        } else {
+            this.data = new HashMap<>(Objects.requireNonNull(data));
+        }
+    }
+
+    public HashMap<String,Object> getData() {
+        return data;
+    }
+
+    public void serialize(OutputStream output) throws IOException {
+        try (ObjectOutputStream oos = new ObjectOutputStream(output)) {
+            oos.writeObject(this);
+            oos.flush();
+        }
+    }
+
+    public static PrimaryKeyData deserialize(InputStream input) throws IOException, ClassNotFoundException {
+        try (LookAheadObjectInputStream ois = new LookAheadObjectInputStream(input)) {
+            Object obj = ois.readObject();
+            if (obj instanceof PrimaryKeyData) {
+                return (PrimaryKeyData) obj;
+            }
+            throw new InvalidClassException(obj == null ? "null" : obj.getClass().getName(), "Disallowed serialized class");
+        }
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java
new file mode 100644
index 0000000..1e36413
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.constants;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.io.IntWritable;
+
+import java.util.List;
+
+/**
+ * Constants using for Hive Storage Handler implementation
+ */
+public class PhoenixStorageHandlerConstants {
+
+    public static final String HBASE_INPUT_FORMAT_CLASS = "phoenix.input.format.class";
+
+    public static final String PHOENIX_TABLE_NAME = "phoenix.table.name";
+
+    public static final String DEFAULT_PHOENIX_INPUT_CLASS = "org.apache.phoenix.hive.mapreduce" +
+            ".PhoenixResultWritable";
+
+    public static final String ZOOKEEPER_QUORUM = "phoenix.zookeeper.quorum";
+    public static final String ZOOKEEPER_PORT = "phoenix.zookeeper.client.port";
+    public static final String ZOOKEEPER_PARENT = "phoenix.zookeeper.znode.parent";
+    public static final String DEFAULT_ZOOKEEPER_QUORUM = "localhost";
+    public static final int DEFAULT_ZOOKEEPER_PORT = 2181;
+    public static final String DEFAULT_ZOOKEEPER_PARENT = "/hbase";
+
+    public static final String PHOENIX_ROWKEYS = "phoenix.rowkeys";
+    public static final String PHOENIX_COLUMN_MAPPING = "phoenix.column.mapping";
+    public static final String PHOENIX_TABLE_OPTIONS = "phoenix.table.options";
+
+    public static final String PHOENIX_TABLE_QUERY_HINT = ".query.hint";
+    public static final String PHOENIX_REDUCER_NUMBER = ".reducer.count";
+    public static final String DISABLE_WAL = ".disable.wal";
+    public static final String BATCH_MODE = "batch.mode";
+    public static final String AUTO_FLUSH = ".auto.flush";
+
+    public static final String COLON = ":";
+    public static final String COMMA = ",";
+    public static final String EMPTY_STRING = "";
+    public static final String SPACE = " ";
+    public static final String LEFT_ROUND_BRACKET = "(";
+    public static final String RIGHT_ROUND_BRACKET = ")";
+    public static final String QUOTATION_MARK = "'";
+    public static final String EQUAL = "=";
+    public static final String IS = "is";
+    public static final String QUESTION = "?";
+
+    public static final String SPLIT_BY_STATS = "split.by.stats";
+    public static final String HBASE_SCAN_CACHE = "hbase.scan.cache";
+    public static final String HBASE_SCAN_CACHEBLOCKS = "hbase.scan.cacheblock";
+    public static final String HBASE_DATE_FORMAT = "hbase.date.format";
+    public static final String HBASE_TIMESTAMP_FORMAT = "hbase.timestamp.format";
+    public static final String DEFAULT_DATE_FORMAT = "yyyy-MM-dd";
+    public static final String DEFAULT_TIMESTAMP_FORMAT = "yyyy-MM-dd HH:mm:ss.SSS";
+
+    public static final String IN_OUT_WORK = "in.out.work";
+    public static final String IN_WORK = "input";
+    public static final String OUT_WORK = "output";
+
+    public static final String MR = "mr";
+    public static final String TEZ = "tez";
+    public static final String SPARK = "spark";
+
+    public static final String DATE_TYPE = "date";
+    public static final String TIMESTAMP_TYPE = "timestamp";
+    public static final String BETWEEN_COMPARATOR = "between";
+    public static final String IN_COMPARATOR = "in";
+    public static final List<String> COMMON_COMPARATOR = Lists.newArrayList("=", "<", ">", "<=",
+            ">=");
+
+    // date/timestamp
+    public static final String COLUMNE_MARKER = "$columnName$";
+    public static final String PATERN_MARKER = "$targetPattern$";
+    public static final String DATE_PATTERN = "'?\\d{4}-\\d{2}-\\d{2}'?";
+    public static final String TIMESTAMP_PATTERN = "'?\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\\" +
+            ".?\\d{0,9}'?";
+    public static final String COMMON_OPERATOR_PATTERN = "(\\(?\"?" + COLUMNE_MARKER + "\"?\\)?\\s*" +
+            "(=|>|<|<=|>=)\\s*(" + PATERN_MARKER + "))";
+    public static final String BETWEEN_OPERATOR_PATTERN = "(\\(?\"?" + COLUMNE_MARKER + "\"?\\)?\\s*(" +
+            "(?i)not)?\\s*(?i)between\\s*(" + PATERN_MARKER + ")\\s*(?i)and\\s*(" + PATERN_MARKER
+            + "))";
+    public static final String IN_OPERATOR_PATTERN = "(\\(?\"?" + COLUMNE_MARKER + "\"?\\)?\\s*((?i)" +
+            "not)?\\s*(?i)in\\s*\\((" + PATERN_MARKER + ",?\\s*)+\\))";
+
+    public static final String FUNCTION_VALUE_MARKER = "$value$";
+    public static final String DATE_FUNCTION_TEMPLETE = "to_date(" + FUNCTION_VALUE_MARKER + ")";
+    public static final String TIMESTAMP_FUNCTION_TEMPLATE = "TIMESTAMP" +
+            FUNCTION_VALUE_MARKER;
+
+    public static final IntWritable INT_ZERO = new IntWritable(0);
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java
new file mode 100644
index 0000000..eac55d4
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java
@@ -0,0 +1,258 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.mapreduce;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Properties;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.mapreduce.RegionSizeCalculator;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
+import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.ql.plan.TableScanDesc;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.InputSplit;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.RecordReader;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.lib.db.DBWritable;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
+import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
+import org.apache.phoenix.hive.ppd.PhoenixPredicateDecomposer;
+import org.apache.phoenix.hive.ql.index.IndexSearchCondition;
+import org.apache.phoenix.hive.query.PhoenixQueryBuilder;
+import org.apache.phoenix.hive.util.PhoenixConnectionUtil;
+import org.apache.phoenix.hive.util.PhoenixStorageHandlerUtil;
+import org.apache.phoenix.iterate.MapReduceParallelScanGrouper;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
+import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.util.PhoenixRuntime;
+
+/**
+ * Custom InputFormat to feed into Hive
+ */
+@SuppressWarnings({"deprecation", "rawtypes"})
+public class PhoenixInputFormat<T extends DBWritable> implements InputFormat<WritableComparable,
+        T> {
+
+    private static final Log LOG = LogFactory.getLog(PhoenixInputFormat.class);
+
+    public PhoenixInputFormat() {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("PhoenixInputFormat created");
+        }
+    }
+
+    @Override
+    public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException {
+        String tableName = jobConf.get(PhoenixStorageHandlerConstants.PHOENIX_TABLE_NAME);
+
+        String query;
+        String executionEngine = jobConf.get(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname,
+                HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.getDefaultValue());
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Target table name at split phase : " + tableName + "with whereCondition :" +
+                    jobConf.get(TableScanDesc.FILTER_TEXT_CONF_STR) +
+                    " and " + HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname + " : " +
+                    executionEngine);
+        }
+
+        List<IndexSearchCondition> conditionList = null;
+        String filterExprSerialized = jobConf.get(TableScanDesc.FILTER_EXPR_CONF_STR);
+        if (filterExprSerialized != null) {
+            ExprNodeGenericFuncDesc filterExpr =
+                    SerializationUtilities.deserializeExpression(filterExprSerialized);
+            PhoenixPredicateDecomposer predicateDecomposer =
+                    PhoenixPredicateDecomposer
+                      .create(Arrays.asList(jobConf.get(serdeConstants.LIST_COLUMNS).split(",")));
+            predicateDecomposer.decomposePredicate(filterExpr);
+            if (predicateDecomposer.isCalledPPD()) {
+                conditionList = predicateDecomposer.getSearchConditionList();
+            }
+        }
+
+        query = PhoenixQueryBuilder.getInstance().buildQuery(jobConf, tableName,
+                PhoenixStorageHandlerUtil.getReadColumnNames(jobConf), conditionList);
+
+        final QueryPlan queryPlan = getQueryPlan(jobConf, query);
+        final List<KeyRange> allSplits = queryPlan.getSplits();
+        final List<InputSplit> splits = generateSplits(jobConf, queryPlan, allSplits, query);
+
+        return splits.toArray(new InputSplit[splits.size()]);
+    }
+
+    private List<InputSplit> generateSplits(final JobConf jobConf, final QueryPlan qplan,
+                                            final List<KeyRange> splits, String query) throws
+            IOException {
+        if (qplan == null){
+            throw new NullPointerException();
+        }if (splits == null){
+            throw new NullPointerException();
+        }
+        final List<InputSplit> psplits = new ArrayList<>(splits.size());
+
+        Path[] tablePaths = FileInputFormat.getInputPaths(ShimLoader.getHadoopShims()
+                .newJobContext(new Job(jobConf)));
+        boolean splitByStats = jobConf.getBoolean(PhoenixStorageHandlerConstants.SPLIT_BY_STATS,
+                false);
+
+        setScanCacheSize(jobConf);
+
+        // Adding Localization
+        try (org.apache.hadoop.hbase.client.Connection connection = ConnectionFactory.createConnection(PhoenixConnectionUtil.getConfiguration(jobConf))) {
+        RegionLocator regionLocator = connection.getRegionLocator(TableName.valueOf(qplan
+                .getTableRef().getTable().getPhysicalName().toString()));
+        RegionSizeCalculator sizeCalculator = new RegionSizeCalculator(regionLocator, connection
+                .getAdmin());
+
+        for (List<Scan> scans : qplan.getScans()) {
+            PhoenixInputSplit inputSplit;
+
+            HRegionLocation location = regionLocator.getRegionLocation(scans.get(0).getStartRow()
+                    , false);
+            long regionSize = sizeCalculator.getRegionSize(location.getRegionInfo().getRegionName
+                    ());
+            String regionLocation = PhoenixStorageHandlerUtil.getRegionLocation(location, LOG);
+
+            if (splitByStats) {
+                for (Scan aScan : scans) {
+                    if (LOG.isDebugEnabled()) {
+                        LOG.debug("Split for  scan : " + aScan + "with scanAttribute : " + aScan
+                                .getAttributesMap() + " [scanCache, cacheBlock, scanBatch] : [" +
+                                aScan.getCaching() + ", " + aScan.getCacheBlocks() + ", " + aScan
+                                .getBatch() + "] and  regionLocation : " + regionLocation);
+                    }
+
+                    inputSplit = new PhoenixInputSplit(new ArrayList<>(Arrays.asList(aScan)), tablePaths[0],
+                            regionLocation, regionSize);
+                    inputSplit.setQuery(query);
+                    psplits.add(inputSplit);
+                }
+            } else {
+                if (LOG.isDebugEnabled()) {
+                    LOG.debug("Scan count[" + scans.size() + "] : " + Bytes.toStringBinary(scans
+                            .get(0).getStartRow()) + " ~ " + Bytes.toStringBinary(scans.get(scans
+                            .size() - 1).getStopRow()));
+                    LOG.debug("First scan : " + scans.get(0) + "with scanAttribute : " + scans
+                            .get(0).getAttributesMap() + " [scanCache, cacheBlock, scanBatch] : " +
+                            "[" + scans.get(0).getCaching() + ", " + scans.get(0).getCacheBlocks()
+                            + ", " + scans.get(0).getBatch() + "] and  regionLocation : " +
+                            regionLocation);
+
+                    for (int i = 0, limit = scans.size(); i < limit; i++) {
+                        LOG.debug("EXPECTED_UPPER_REGION_KEY[" + i + "] : " + Bytes
+                                .toStringBinary(scans.get(i).getAttribute
+                                        (BaseScannerRegionObserver.EXPECTED_UPPER_REGION_KEY)));
+                    }
+                }
+
+                inputSplit = new PhoenixInputSplit(scans, tablePaths[0], regionLocation,
+                        regionSize);
+                inputSplit.setQuery(query);
+                psplits.add(inputSplit);
+            }
+        }
+		}
+
+        return psplits;
+    }
+
+    private void setScanCacheSize(JobConf jobConf) {
+        int scanCacheSize = jobConf.getInt(PhoenixStorageHandlerConstants.HBASE_SCAN_CACHE, -1);
+        if (scanCacheSize > 0) {
+            jobConf.setInt(HConstants.HBASE_CLIENT_SCANNER_CACHING, scanCacheSize);
+        }
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Generating splits with scanCacheSize : " + scanCacheSize);
+        }
+    }
+
+    @Override
+    public RecordReader<WritableComparable, T> getRecordReader(InputSplit split, JobConf job,
+                                                               Reporter reporter) throws
+            IOException {
+        final QueryPlan queryPlan = getQueryPlan(job, ((PhoenixInputSplit) split).getQuery());
+        @SuppressWarnings("unchecked")
+        final Class<T> inputClass = (Class<T>) job.getClass(PhoenixConfigurationUtil.INPUT_CLASS,
+                PhoenixResultWritable.class);
+
+        PhoenixRecordReader<T> recordReader = new PhoenixRecordReader<T>(inputClass, job,
+                queryPlan);
+        recordReader.initialize(split);
+
+        return recordReader;
+    }
+
+    /**
+     * Returns the query plan associated with the select query.
+     */
+    private QueryPlan getQueryPlan(final Configuration configuration, String selectStatement)
+            throws IOException {
+        try {
+            final String currentScnValue = configuration.get(PhoenixConfigurationUtil
+                    .CURRENT_SCN_VALUE);
+            final Properties overridingProps = new Properties();
+            if (currentScnValue != null) {
+                overridingProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, currentScnValue);
+            }
+            final Connection connection = PhoenixConnectionUtil.getInputConnection(configuration,
+                    overridingProps);
+            if (selectStatement == null) {
+                throw new NullPointerException();
+            }
+            final Statement statement = connection.createStatement();
+            final PhoenixStatement pstmt = statement.unwrap(PhoenixStatement.class);
+
+            if (LOG.isDebugEnabled()) {
+                LOG.debug("Compiled query : " + selectStatement);
+            }
+
+            // Optimize the query plan so that we potentially use secondary indexes
+            final QueryPlan queryPlan = pstmt.optimizeQuery(selectStatement);
+            // Initialize the query plan so it sets up the parallel scans
+            queryPlan.iterator(MapReduceParallelScanGrouper.getInstance());
+            return queryPlan;
+        } catch (Exception exception) {
+            LOG.error(String.format("Failed to get the query plan with error [%s]", exception.getMessage()));
+            throw new RuntimeException(exception);
+        }
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputSplit.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputSplit.java
new file mode 100644
index 0000000..39e8744
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputSplit.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.mapreduce;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.mapred.FileSplit;
+import org.apache.hadoop.mapred.InputSplit;
+import org.apache.phoenix.query.KeyRange;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * InputSplit implementation. Represents the data to be processed by an individual Mapper
+ */
+public class PhoenixInputSplit extends FileSplit implements InputSplit {
+
+    private List<Scan> scans;
+    private KeyRange keyRange;
+
+    private long regionSize;
+
+    //  query is in the  split because it is not delivered in jobConf.
+    private String query;
+
+    public PhoenixInputSplit() {
+    }
+
+    public PhoenixInputSplit(final List<Scan> scans, Path dummyPath, String regionLocation, long
+            length) {
+        super(dummyPath, 0, 0, new String[]{regionLocation});
+
+        regionSize = length;
+
+        Preconditions.checkNotNull(scans);
+        Preconditions.checkState(!scans.isEmpty());
+        this.scans = scans;
+        init();
+    }
+
+    public List<Scan> getScans() {
+        return scans;
+    }
+
+    public KeyRange getKeyRange() {
+        return keyRange;
+    }
+
+    public String getQuery() {
+        return query;
+    }
+
+    public void setQuery(String query) {
+        this.query = query;
+    }
+
+    private void init() {
+        this.keyRange = KeyRange.getKeyRange(scans.get(0).getStartRow(), scans.get(scans.size() -
+                1).getStopRow());
+    }
+
+    @Override
+    public void write(DataOutput out) throws IOException {
+        super.write(out);
+
+        Preconditions.checkNotNull(scans);
+        WritableUtils.writeVInt(out, scans.size());
+        for (Scan scan : scans) {
+            ClientProtos.Scan protoScan = ProtobufUtil.toScan(scan);
+            byte[] protoScanBytes = protoScan.toByteArray();
+            WritableUtils.writeVInt(out, protoScanBytes.length);
+            out.write(protoScanBytes);
+        }
+
+        WritableUtils.writeString(out, query);
+        WritableUtils.writeVLong(out, regionSize);
+    }
+
+    @Override
+    public void readFields(DataInput in) throws IOException {
+        super.readFields(in);
+
+        int count = WritableUtils.readVInt(in);
+        scans = new ArrayList<>(count);
+        for (int i = 0; i < count; i++) {
+            byte[] protoScanBytes = new byte[WritableUtils.readVInt(in)];
+            in.readFully(protoScanBytes);
+            ClientProtos.Scan protoScan = ClientProtos.Scan.parseFrom(protoScanBytes);
+            Scan scan = ProtobufUtil.toScan(protoScan);
+            scans.add(scan);
+        }
+        init();
+
+        query = WritableUtils.readString(in);
+        regionSize = WritableUtils.readVLong(in);
+    }
+
+    @Override
+    public long getLength() {
+        return regionSize;
+    }
+
+    @Override
+    public String[] getLocations() throws IOException {
+        return new String[]{};
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + keyRange.hashCode();
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) {
+            return true;
+        }
+        if (obj == null) {
+            return false;
+        }
+        if (!(obj instanceof PhoenixInputSplit)) {
+            return false;
+        }
+        PhoenixInputSplit other = (PhoenixInputSplit) obj;
+        if (keyRange == null) {
+            if (other.keyRange != null) {
+                return false;
+            }
+        } else if (!keyRange.equals(other.keyRange)) {
+            return false;
+        }
+        return true;
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixOutputFormat.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixOutputFormat.java
new file mode 100644
index 0000000..ed47176
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixOutputFormat.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.mapreduce;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
+import org.apache.hadoop.hive.ql.io.RecordUpdater;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.OutputFormat;
+import org.apache.hadoop.mapred.RecordWriter;
+import org.apache.hadoop.mapreduce.lib.db.DBWritable;
+import org.apache.hadoop.util.Progressable;
+import org.apache.phoenix.hive.util.PhoenixStorageHandlerUtil;
+
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.Properties;
+
+/**
+ * Custom OutputFormat to feed into Hive. Describes the output-specification for a Map-Reduce job.
+ */
+public class PhoenixOutputFormat<T extends DBWritable> implements OutputFormat<NullWritable, T>,
+        AcidOutputFormat<NullWritable, T> {
+
+    private static final Log LOG = LogFactory.getLog(PhoenixOutputFormat.class);
+
+    public PhoenixOutputFormat() {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("PhoenixOutputFormat created");
+        }
+    }
+
+    @Override
+    public RecordWriter<NullWritable, T> getRecordWriter(FileSystem ignored, JobConf jobConf,
+                                                         String name, Progressable progress)
+            throws IOException {
+        return createRecordWriter(jobConf, new Properties());
+    }
+
+    @Override
+    public void checkOutputSpecs(FileSystem ignored, JobConf job) throws IOException {
+
+    }
+
+    @Override
+    public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getHiveRecordWriter
+            (JobConf jobConf, Path finalOutPath, Class<? extends Writable> valueClass, boolean
+                    isCompressed, Properties tableProperties, Progressable progress) throws
+            IOException {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Get RecordWriter for finalOutPath : " + finalOutPath + ", valueClass" +
+                    " : " +
+                    valueClass
+                            .getName() + ", isCompressed : " + isCompressed + ", tableProperties " +
+                    ": " + tableProperties + ", progress : " + progress);
+        }
+
+        return createRecordWriter(jobConf, new Properties());
+    }
+
+    @Override
+    public RecordUpdater getRecordUpdater(Path path, org.apache.hadoop.hive.ql.io
+            .AcidOutputFormat.Options options) throws IOException {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Get RecordWriter for  path : " + path + ", options : " +
+                    PhoenixStorageHandlerUtil
+                            .getOptionsValue(options));
+        }
+        return new PhoenixRecordWriter<T>(path, options);
+    }
+
+    @Override
+    public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getRawRecordWriter(Path path,
+            org.apache.hadoop.hive.ql.io.AcidOutputFormat.Options options) throws IOException {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Get RawRecordWriter for path : " + path + ", options : " +
+                    PhoenixStorageHandlerUtil.getOptionsValue(options));
+        }
+
+        return new PhoenixRecordWriter<T>(path, options);
+    }
+
+    private PhoenixRecordWriter<T> createRecordWriter(Configuration config, Properties properties) {
+        try {
+            return new PhoenixRecordWriter<T>(config, properties);
+        } catch (SQLException e) {
+            LOG.error("Error during PhoenixRecordWriter instantiation :" + e.getMessage());
+            throw new RuntimeException(e);
+        }
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixRecordReader.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixRecordReader.java
new file mode 100644
index 0000000..3b0dadf
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixRecordReader.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.mapreduce;
+
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.mapred.InputSplit;
+import org.apache.hadoop.mapred.RecordReader;
+import org.apache.hadoop.mapreduce.lib.db.DBWritable;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
+import org.apache.phoenix.hive.PhoenixRowKey;
+import org.apache.phoenix.hive.util.PhoenixStorageHandlerUtil;
+import org.apache.phoenix.iterate.ConcatResultIterator;
+import org.apache.phoenix.iterate.LookAheadResultIterator;
+import org.apache.phoenix.iterate.MapReduceParallelScanGrouper;
+import org.apache.phoenix.iterate.PeekingResultIterator;
+import org.apache.phoenix.iterate.ResultIterator;
+import org.apache.phoenix.iterate.RoundRobinResultIterator;
+import org.apache.phoenix.iterate.SequenceResultIterator;
+import org.apache.phoenix.iterate.TableResultIterator;
+import org.apache.phoenix.jdbc.PhoenixResultSet;
+import org.apache.phoenix.monitoring.ReadMetricQueue;
+import org.apache.phoenix.monitoring.ScanMetricsHolder;
+
+import com.google.common.base.Throwables;
+
+/**
+ * @RecordReader implementation that iterates over the the records.
+ */
+@SuppressWarnings("rawtypes")
+public class PhoenixRecordReader<T extends DBWritable> implements
+        RecordReader<WritableComparable, T> {
+
+    private static final Log LOG = LogFactory.getLog(PhoenixRecordReader.class);
+
+    private final Configuration configuration;
+    private final QueryPlan queryPlan;
+    private WritableComparable key;
+    private T value = null;
+    private Class<T> inputClass;
+    private ResultIterator resultIterator = null;
+    private PhoenixResultSet resultSet;
+    private long readCount;
+
+
+    private boolean isTransactional;
+
+    public PhoenixRecordReader(Class<T> inputClass, final Configuration configuration, final
+    QueryPlan queryPlan) throws IOException {
+        this.inputClass = inputClass;
+        this.configuration = configuration;
+        this.queryPlan = queryPlan;
+
+        isTransactional = PhoenixStorageHandlerUtil.isTransactionalTable(configuration);
+    }
+
+    public void initialize(InputSplit split) throws IOException {
+        final PhoenixInputSplit pSplit = (PhoenixInputSplit) split;
+        final List<Scan> scans = pSplit.getScans();
+
+        if (LOG.isInfoEnabled()) {
+            LOG.info("Target table : " + queryPlan.getTableRef().getTable().getPhysicalName());
+        }
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Scan count[" + scans.size() + "] : " + Bytes.toStringBinary(scans.get(0)
+                    .getStartRow()) + " ~ " + Bytes.toStringBinary(scans.get(scans.size() - 1)
+                    .getStopRow()));
+            LOG.debug("First scan : " + scans.get(0) + " scanAttribute : " + scans.get(0)
+                    .getAttributesMap());
+
+            for (int i = 0, limit = scans.size(); i < limit; i++) {
+                LOG.debug("EXPECTED_UPPER_REGION_KEY[" + i + "] : " +
+                        Bytes.toStringBinary(scans.get(i).getAttribute(BaseScannerRegionObserver
+                                .EXPECTED_UPPER_REGION_KEY)));
+            }
+        }
+
+        try {
+            List<PeekingResultIterator> iterators = new ArrayList<>(scans.size
+                    ());
+            StatementContext ctx = queryPlan.getContext();
+            ReadMetricQueue readMetrics = ctx.getReadMetricsQueue();
+            String tableName = queryPlan.getTableRef().getTable().getPhysicalName().getString();
+            long renewScannerLeaseThreshold = queryPlan.getContext().getConnection()
+                    .getQueryServices().getRenewLeaseThresholdMilliSeconds();
+            for (Scan scan : scans) {
+                scan.setAttribute(BaseScannerRegionObserver.SKIP_REGION_BOUNDARY_CHECK, Bytes
+                        .toBytes(true));
+                ScanMetricsHolder scanMetricsHolder = ScanMetricsHolder.getInstance(readMetrics, tableName, scan, ctx.getConnection().getLogLevel());
+                final TableResultIterator tableResultIterator = new TableResultIterator(
+                        queryPlan.getContext().getConnection().getMutationState(), scan, scanMetricsHolder,
+                        renewScannerLeaseThreshold, queryPlan, MapReduceParallelScanGrouper.getInstance());
+
+                PeekingResultIterator peekingResultIterator = LookAheadResultIterator.wrap
+                        (tableResultIterator);
+                iterators.add(peekingResultIterator);
+            }
+            ResultIterator iterator = queryPlan.useRoundRobinIterator()
+                    ? RoundRobinResultIterator.newIterator(iterators, queryPlan)
+                    : ConcatResultIterator.newIterator(iterators);
+            if (queryPlan.getContext().getSequenceManager().getSequenceCount() > 0) {
+                iterator = new SequenceResultIterator(iterator, queryPlan.getContext()
+                        .getSequenceManager());
+            }
+            this.resultIterator = iterator;
+            // Clone the row projector as it's not thread safe and would be used
+            // simultaneously by multiple threads otherwise.
+            this.resultSet = new PhoenixResultSet(this.resultIterator, queryPlan.getProjector()
+                    .cloneIfNecessary(),
+                    queryPlan.getContext());
+        } catch (SQLException e) {
+            LOG.error(String.format(" Error [%s] initializing PhoenixRecordReader. ", e
+                    .getMessage()));
+            Throwables.propagate(e);
+        }
+    }
+
+    @Override
+    public boolean next(WritableComparable key, T value) throws IOException {
+        try {
+            if (!resultSet.next()) {
+                return false;
+            }
+            value.readFields(resultSet);
+
+            if (isTransactional) {
+                ((PhoenixResultWritable) value).readPrimaryKey((PhoenixRowKey) key);
+            }
+
+            ++readCount;
+
+            if (LOG.isTraceEnabled()) {
+                LOG.trace("Result[" + readCount + "] : " + ((PhoenixResultWritable) value)
+                        .getResultMap());
+            }
+
+            return true;
+        } catch (SQLException e) {
+            LOG.error(String.format(" Error [%s] occurred while iterating over the resultset. ",
+                    e.getMessage()));
+            throw new RuntimeException(e);
+        }
+    }
+
+    @Override
+    public WritableComparable createKey() {
+        if (isTransactional) {
+            key = new PhoenixRowKey();
+        } else {
+            key = NullWritable.get();
+        }
+
+        return key;
+    }
+
+    @Override
+    public T createValue() {
+        value = ReflectionUtils.newInstance(inputClass, this.configuration);
+        return value;
+    }
+
+    @Override
+    public long getPos() throws IOException {
+        return 0;
+    }
+
+    @Override
+    public void close() throws IOException {
+        if (LOG.isInfoEnabled()) {
+            LOG.info("Read Count : " + readCount);
+        }
+
+        if (resultIterator != null) {
+            try {
+                resultIterator.close();
+            } catch (SQLException e) {
+                LOG.error(" Error closing resultset.");
+                throw new RuntimeException(e);
+            }
+        }
+
+    }
+
+    @Override
+    public float getProgress() throws IOException {
+        return 0;
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixRecordWriter.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixRecordWriter.java
new file mode 100644
index 0000000..fcced90
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixRecordWriter.java
@@ -0,0 +1,360 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.mapreduce;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.util.Properties;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
+import org.apache.hadoop.hive.ql.io.RecordUpdater;
+import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.SerDeStats;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.mapred.RecordWriter;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.mapreduce.lib.db.DBWritable;
+import org.apache.phoenix.hive.PhoenixSerializer;
+import org.apache.phoenix.hive.PhoenixSerializer.DmlType;
+import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
+import org.apache.phoenix.hive.util.PhoenixConnectionUtil;
+import org.apache.phoenix.hive.util.PhoenixStorageHandlerUtil;
+import org.apache.phoenix.hive.util.PhoenixUtil;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
+import org.apache.phoenix.schema.ConcurrentTableMutationException;
+import org.apache.phoenix.schema.MetaDataClient;
+import org.apache.phoenix.util.QueryUtil;
+
+/**
+ *
+ * RecordWriter implementation. Writes records to the output
+ * WARNING : There is possibility that WAL disable setting not working properly due concurrent
+ * enabling/disabling WAL.
+ *
+ */
+public class PhoenixRecordWriter<T extends DBWritable> implements RecordWriter<NullWritable, T>,
+        org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter, RecordUpdater {
+
+    private static final Log LOG = LogFactory.getLog(PhoenixRecordWriter.class);
+
+    private Connection conn;
+    private PreparedStatement pstmt;
+    private long batchSize;
+    private long numRecords = 0;
+
+    private Configuration config;
+    private String tableName;
+    private MetaDataClient metaDataClient;
+    private boolean restoreWalMode;
+
+    // For RecordUpdater
+    private long rowCountDelta = 0;
+    private PhoenixSerializer phoenixSerializer;
+    private ObjectInspector objInspector;
+    private PreparedStatement pstmtForDelete;
+
+    // For RecordUpdater
+    public PhoenixRecordWriter(Path path, AcidOutputFormat.Options options) throws IOException {
+        Configuration config = options.getConfiguration();
+        Properties props = new Properties();
+
+        try {
+            initialize(config, props);
+        } catch (SQLException e) {
+            throw new IOException(e);
+        }
+
+        this.objInspector = options.getInspector();
+        try {
+            phoenixSerializer = new PhoenixSerializer(config, options.getTableProperties());
+        } catch (SerDeException e) {
+            throw new IOException(e);
+        }
+    }
+
+    public PhoenixRecordWriter(final Configuration configuration, final Properties props) throws
+            SQLException {
+        initialize(configuration, props);
+    }
+
+    private void initialize(Configuration config, Properties properties) throws SQLException {
+        this.config = config;
+        tableName = config.get(PhoenixStorageHandlerConstants.PHOENIX_TABLE_NAME);
+
+        // Disable WAL
+        String walConfigName = tableName.toLowerCase() + PhoenixStorageHandlerConstants.DISABLE_WAL;
+        boolean disableWal = config.getBoolean(walConfigName, false);
+        if (disableWal) {
+            if (LOG.isDebugEnabled()) {
+                LOG.debug("Property " + walConfigName + " is true. batch.mode will be set true. ");
+            }
+
+            properties.setProperty(PhoenixStorageHandlerConstants.BATCH_MODE, "true");
+        }
+
+        this.conn = PhoenixConnectionUtil.getInputConnection(config, properties);
+
+        if (disableWal) {
+            metaDataClient = new MetaDataClient((PhoenixConnection) conn);
+
+            if (!PhoenixUtil.isDisabledWal(metaDataClient, tableName)) {
+                // execute alter tablel statement if disable_wal is not true.
+                try {
+                    PhoenixUtil.alterTableForWalDisable(conn, tableName, true);
+                } catch (ConcurrentTableMutationException e) {
+                    if (LOG.isWarnEnabled()) {
+                        LOG.warn("Another mapper or task processing wal disable");
+                    }
+                }
+
+                if (LOG.isDebugEnabled()) {
+                    LOG.debug(tableName + "s wal disabled.");
+                }
+
+                // restore original value of disable_wal at the end.
+                restoreWalMode = true;
+            }
+        }
+
+        this.batchSize = PhoenixConfigurationUtil.getBatchSize(config);
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Batch-size : " + batchSize);
+        }
+
+        String upsertQuery = QueryUtil.constructUpsertStatement(tableName, PhoenixUtil
+                .getColumnInfoList(conn, tableName));
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Upsert-query : " + upsertQuery);
+        }
+        this.pstmt = this.conn.prepareStatement(upsertQuery);
+    }
+
+    @Override
+    public void write(NullWritable key, T record) throws IOException {
+        try {
+            record.write(pstmt);
+            numRecords++;
+            pstmt.executeUpdate();
+
+            if (numRecords % batchSize == 0) {
+                LOG.debug("Commit called on a batch of size : " + batchSize);
+                conn.commit();
+            }
+        } catch (SQLException e) {
+            throw new IOException("Exception while writing to table.", e);
+        }
+    }
+
+    @Override
+    public void close(Reporter reporter) throws IOException {
+        try {
+            conn.commit();
+
+            if (LOG.isInfoEnabled()) {
+                LOG.info("Wrote row : " + numRecords);
+            }
+        } catch (SQLException e) {
+            LOG.error("SQLException while performing the commit for the task.");
+            throw new IOException(e);
+        } finally {
+            try {
+                if (restoreWalMode && PhoenixUtil.isDisabledWal(metaDataClient, tableName)) {
+                    try {
+                        PhoenixUtil.alterTableForWalDisable(conn, tableName, false);
+                    } catch (ConcurrentTableMutationException e) {
+                        if (LOG.isWarnEnabled()) {
+                            LOG.warn("Another mapper or task processing wal enable");
+                        }
+                    }
+
+                    if (LOG.isDebugEnabled()) {
+                        LOG.debug(tableName + "s wal enabled.");
+                    }
+                }
+
+                // flush if [table-name].auto.flush is true.
+                String autoFlushConfigName = tableName.toLowerCase() +
+                        PhoenixStorageHandlerConstants.AUTO_FLUSH;
+                boolean autoFlush = config.getBoolean(autoFlushConfigName, false);
+                if (autoFlush) {
+                    if (LOG.isDebugEnabled()) {
+                        LOG.debug("autoFlush is true.");
+                    }
+
+                    PhoenixUtil.flush(conn, tableName);
+                }
+
+                PhoenixUtil.closeResource(pstmt);
+                PhoenixUtil.closeResource(pstmtForDelete);
+                PhoenixUtil.closeResource(conn);
+            } catch (SQLException ex) {
+                LOG.error("SQLException while closing the connection for the task.");
+                throw new IOException(ex);
+            }
+        }
+    }
+
+    // For Testing
+    public boolean isRestoreWalMode() {
+        return restoreWalMode;
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public void write(Writable w) throws IOException {
+        PhoenixResultWritable row = (PhoenixResultWritable) w;
+
+        write(NullWritable.get(), (T) row);
+    }
+
+    @Override
+    public void close(boolean abort) throws IOException {
+        close(Reporter.NULL);
+    }
+
+    @Override
+    public void insert(long currentTransaction, Object row) throws IOException {
+        if (LOG.isTraceEnabled()) {
+            LOG.trace("insert transaction : " + currentTransaction + ", row : " +
+                    PhoenixStorageHandlerUtil.toString(row));
+        }
+
+        PhoenixResultWritable pResultWritable = (PhoenixResultWritable) phoenixSerializer
+                .serialize(row, objInspector, DmlType.INSERT);
+
+        if (LOG.isTraceEnabled()) {
+            LOG.trace("Data : " + pResultWritable.getValueList());
+        }
+
+        write(pResultWritable);
+        rowCountDelta++;
+    }
+
+    @Override
+    public void update(long currentTransaction, Object row) throws IOException {
+        if (LOG.isTraceEnabled()) {
+            LOG.trace("update transaction : " + currentTransaction + ", row : " +
+                    PhoenixStorageHandlerUtil
+                            .toString(row));
+        }
+
+        PhoenixResultWritable pResultWritable = (PhoenixResultWritable) phoenixSerializer
+                .serialize(row, objInspector, DmlType.UPDATE);
+
+        if (LOG.isTraceEnabled()) {
+            LOG.trace("Data : " + pResultWritable.getValueList());
+        }
+
+        write(pResultWritable);
+    }
+
+    @Override
+    public void delete(long currentTransaction, Object row) throws IOException {
+        if (LOG.isTraceEnabled()) {
+            LOG.trace("delete transaction : " + currentTransaction + ", row : " +
+                    PhoenixStorageHandlerUtil.toString(row));
+        }
+
+        PhoenixResultWritable pResultWritable = (PhoenixResultWritable) phoenixSerializer
+                .serialize(row, objInspector, DmlType.DELETE);
+
+        if (LOG.isTraceEnabled()) {
+            LOG.trace("Data : " + pResultWritable.getValueList());
+        }
+
+        if (pstmtForDelete == null) {
+            try {
+                String deleteQuery = PhoenixUtil.constructDeleteStatement(conn, tableName);
+
+                if (LOG.isDebugEnabled()) {
+                    LOG.debug("Delete query : " + deleteQuery);
+                }
+
+                pstmtForDelete = conn.prepareStatement(deleteQuery);
+            } catch (SQLException e) {
+                throw new IOException(e);
+            }
+        }
+
+        delete(pResultWritable);
+
+        rowCountDelta--;
+    }
+
+    private void delete(PhoenixResultWritable pResultWritable) throws IOException {
+        try {
+            pResultWritable.delete(pstmtForDelete);
+            numRecords++;
+            pstmtForDelete.executeUpdate();
+
+            if (numRecords % batchSize == 0) {
+                LOG.debug("Commit called on a batch of size : " + batchSize);
+                conn.commit();
+            }
+        } catch (SQLException e) {
+            throw new IOException("Exception while deleting to table.", e);
+        }
+    }
+
+    @Override
+    public void flush() throws IOException {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Flush called");
+        }
+
+        try {
+            conn.commit();
+
+            if (LOG.isInfoEnabled()) {
+                LOG.info("Written row : " + numRecords);
+            }
+        } catch (SQLException e) {
+            LOG.error("SQLException while performing the commit for the task.");
+            throw new IOException(e);
+        }
+    }
+
+    @Override
+    public SerDeStats getStats() {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("getStats called");
+        }
+
+        SerDeStats stats = new SerDeStats();
+        stats.setRowCount(rowCountDelta);
+        // Don't worry about setting raw data size diff.  There is no reasonable way  to calculate
+        // that without finding the row we are updating or deleting, which would be a mess.
+        return stats;
+    }
+
+    @Override
+    public long getBufferedRowCount() {
+        return numRecords;
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixResultWritable.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixResultWritable.java
new file mode 100644
index 0000000..f322767
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixResultWritable.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.mapreduce;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.mapreduce.lib.db.DBWritable;
+import org.apache.phoenix.hive.PhoenixRowKey;
+import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
+import org.apache.phoenix.hive.util.ColumnMappingUtils;
+import org.apache.phoenix.hive.util.PhoenixStorageHandlerUtil;
+import org.apache.phoenix.hive.util.PhoenixUtil;
+import org.apache.phoenix.util.ColumnInfo;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Serialized class for SerDe
+ *
+ */
+public class PhoenixResultWritable implements Writable, DBWritable, Configurable {
+
+    private static final Log LOG = LogFactory.getLog(PhoenixResultWritable.class);
+
+    private List<ColumnInfo> columnMetadataList;
+    private List<Object> valueList;    // for output
+    private Map<String, Object> rowMap = new HashMap<>();  // for input
+    private Map<String, String> columnMap;
+
+    private int columnCount = -1;
+
+    private Configuration config;
+    private boolean isTransactional;
+    private Map<String, Object> rowKeyMap = new LinkedHashMap();
+    private List<String> primaryKeyColumnList;
+
+    public PhoenixResultWritable() {
+    }
+
+    public PhoenixResultWritable(Configuration config) throws IOException {
+        setConf(config);
+    }
+
+    public PhoenixResultWritable(Configuration config, List<ColumnInfo> columnMetadataList)
+            throws IOException {
+        this(config);
+        this.columnMetadataList = columnMetadataList;
+        valueList = new ArrayList<>(columnMetadataList.size());
+    }
+
+    @Override
+    public void write(DataOutput out) throws IOException {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void readFields(DataInput in) throws IOException {
+        throw new UnsupportedOperationException();
+    }
+
+    // for write
+    public void clear() {
+        valueList.clear();
+    }
+
+    // for write
+    public void add(Object value) {
+        valueList.add(value);
+    }
+
+    @Override
+    public void write(PreparedStatement statement) throws SQLException {
+        ColumnInfo columnInfo = null;
+        Object value = null;
+
+        try {
+            for (int i = 0, limit = columnMetadataList.size(); i < limit; i++) {
+                columnInfo = columnMetadataList.get(i);
+
+                if (valueList.size() > i) {
+                    value = valueList.get(i);
+                } else {
+                    value = null;
+                }
+
+                if (value == null) {
+                    statement.setNull(i + 1, columnInfo.getSqlType());
+                } else {
+                    statement.setObject(i + 1, value, columnInfo.getSqlType());
+                }
+            }
+        } catch (SQLException | RuntimeException e) {
+            LOG.error("[column-info, value] : " + columnInfo + ", " + value);
+            throw e;
+        }
+    }
+
+    public void delete(PreparedStatement statement) throws SQLException {
+        ColumnInfo columnInfo = null;
+        Object value = null;
+
+        try {
+            for (int i = 0, limit = primaryKeyColumnList.size(); i < limit; i++) {
+                columnInfo = columnMetadataList.get(i);
+
+                if (valueList.size() > i) {
+                    value = valueList.get(i);
+                } else {
+                    value = null;
+                }
+
+                if (value == null) {
+                    statement.setNull(i + 1, columnInfo.getSqlType());
+                } else {
+                    statement.setObject(i + 1, value, columnInfo.getSqlType());
+                }
+            }
+        } catch (SQLException | RuntimeException e) {
+            LOG.error("[column-info, value] : " + columnInfo + ", " + value);
+            throw e;
+        }
+    }
+
+    @Override
+    public void readFields(ResultSet resultSet) throws SQLException {
+        ResultSetMetaData rsmd = resultSet.getMetaData();
+        if (columnCount == -1) {
+            this.columnCount = rsmd.getColumnCount();
+        }
+        rowMap.clear();
+
+        for (int i = 0; i < columnCount; i++) {
+            Object value = resultSet.getObject(i + 1);
+            String columnName = rsmd.getColumnName(i + 1);
+            String mapName = columnMap.get(columnName);
+            if(mapName != null) {
+                columnName = mapName;
+            }
+            rowMap.put(columnName, value);
+        }
+
+        // Adding row__id column.
+        if (isTransactional) {
+            rowKeyMap.clear();
+
+            for (String pkColumn : primaryKeyColumnList) {
+                rowKeyMap.put(pkColumn, rowMap.get(pkColumn));
+            }
+        }
+    }
+
+    public void readPrimaryKey(PhoenixRowKey rowKey) {
+        rowKey.setRowKeyMap(rowKeyMap);
+    }
+
+    public List<ColumnInfo> getColumnMetadataList() {
+        return columnMetadataList;
+    }
+
+    public void setColumnMetadataList(List<ColumnInfo> columnMetadataList) {
+        this.columnMetadataList = columnMetadataList;
+    }
+
+    public Map<String, Object> getResultMap() {
+        return rowMap;
+    }
+
+    public List<Object> getValueList() {
+        return valueList;
+    }
+
+    @Override
+    public void setConf(Configuration conf) {
+        config = conf;
+        this.columnMap = ColumnMappingUtils.getReverseColumnMapping(config.get(PhoenixStorageHandlerConstants.PHOENIX_COLUMN_MAPPING,""));
+
+        isTransactional = PhoenixStorageHandlerUtil.isTransactionalTable(config);
+
+        if (isTransactional) {
+            primaryKeyColumnList = PhoenixUtil.getPrimaryKeyColumnList(config, config.get
+                    (PhoenixStorageHandlerConstants.PHOENIX_TABLE_NAME));
+        }
+    }
+
+    @Override
+    public Configuration getConf() {
+        return config;
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/AbstractPhoenixObjectInspector.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/AbstractPhoenixObjectInspector.java
new file mode 100644
index 0000000..1de1cc7
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/AbstractPhoenixObjectInspector.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.objectinspector;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive
+        .AbstractPrimitiveLazyObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.io.Writable;
+
+/**
+ * AbstractPhoenixObjectInspector for a LazyPrimitive object
+ */
+public abstract class AbstractPhoenixObjectInspector<T extends Writable>
+        extends AbstractPrimitiveLazyObjectInspector<T> {
+
+    private final Log log;
+
+    public AbstractPhoenixObjectInspector() {
+        super();
+
+        log = LogFactory.getLog(getClass());
+    }
+
+    protected AbstractPhoenixObjectInspector(PrimitiveTypeInfo typeInfo) {
+        super(typeInfo);
+
+        log = LogFactory.getLog(getClass());
+    }
+
+    @Override
+    public Object getPrimitiveJavaObject(Object o) {
+        return o == null ? null : o;
+    }
+
+    public void logExceptionMessage(Object value, String dataType) {
+        if (log.isDebugEnabled()) {
+            log.debug("Data not in the " + dataType + " data type range so converted to null. " +
+                    "Given data is :"
+                    + value.toString(), new Exception("For debugging purposes"));
+        }
+    }
+}
\ No newline at end of file
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixBinaryObjectInspector.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixBinaryObjectInspector.java
new file mode 100644
index 0000000..2c642d2
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixBinaryObjectInspector.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.objectinspector;
+
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.BinaryObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.io.BytesWritable;
+
+/**
+ * ObjectInspector for Binary type
+ */
+
+public class PhoenixBinaryObjectInspector extends AbstractPhoenixObjectInspector<BytesWritable>
+        implements BinaryObjectInspector {
+
+    public PhoenixBinaryObjectInspector() {
+        super(TypeInfoFactory.binaryTypeInfo);
+    }
+
+    @Override
+    public Object copyObject(Object o) {
+        byte[] clone = null;
+
+        if (o != null) {
+            byte[] source = (byte[]) o;
+            clone = new byte[source.length];
+            System.arraycopy(source, 0, clone, 0, source.length);
+        }
+
+        return clone;
+    }
+
+    @Override
+    public byte[] getPrimitiveJavaObject(Object o) {
+        return (byte[]) o;
+    }
+
+    @Override
+    public BytesWritable getPrimitiveWritableObject(Object o) {
+        return new BytesWritable((byte[]) o);
+    }
+
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixBooleanObjectInspector.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixBooleanObjectInspector.java
new file mode 100644
index 0000000..a767ca0
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixBooleanObjectInspector.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.objectinspector;
+
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.BooleanObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.io.BooleanWritable;
+
+public class PhoenixBooleanObjectInspector extends AbstractPhoenixObjectInspector<BooleanWritable>
+        implements BooleanObjectInspector {
+
+    public PhoenixBooleanObjectInspector() {
+        super(TypeInfoFactory.booleanTypeInfo);
+    }
+
+    @Override
+    public Object copyObject(Object o) {
+        return o == null ? null : new Boolean((Boolean) o);
+    }
+
+    @Override
+    public BooleanWritable getPrimitiveWritableObject(Object o) {
+        return new BooleanWritable(get(o));
+    }
+
+    @Override
+    public boolean get(Object o) {
+        Boolean value = null;
+
+        if (o != null) {
+            try {
+                value = (Boolean) o;
+            } catch (Exception e) {
+                logExceptionMessage(o, "BOOLEAN");
+            }
+        }
+
+        return value;
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixByteObjectInspector.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixByteObjectInspector.java
new file mode 100644
index 0000000..6972238
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixByteObjectInspector.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.objectinspector;
+
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.ByteObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.hive.serde2.io.ByteWritable;
+
+/**
+ * ObjectInspector for byte type
+ */
+public class PhoenixByteObjectInspector extends AbstractPhoenixObjectInspector<ByteWritable>
+        implements ByteObjectInspector {
+
+    public PhoenixByteObjectInspector() {
+        super(TypeInfoFactory.byteTypeInfo);
+    }
+
+    @Override
+    public Object copyObject(Object o) {
+        return o == null ? null : new Byte((Byte) o);
+    }
+
+    @Override
+    public ByteWritable getPrimitiveWritableObject(Object o) {
+        return new ByteWritable(get(o));
+    }
+
+    @Override
+    public byte get(Object o) {
+        Byte value = null;
+
+        if (o != null) {
+            try {
+                value = (Byte) o;
+            } catch (Exception e) {
+                logExceptionMessage(o, "BYTE");
+            }
+        }
+
+        return value;
+    }
+
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixCharObjectInspector.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixCharObjectInspector.java
new file mode 100644
index 0000000..17222a2
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixCharObjectInspector.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.objectinspector;
+
+import org.apache.hadoop.hive.common.type.HiveChar;
+import org.apache.hadoop.hive.serde2.io.HiveCharWritable;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveCharObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+
+/**
+ * ObjectInspector for char type
+ */
+public class PhoenixCharObjectInspector extends AbstractPhoenixObjectInspector<HiveCharWritable>
+        implements HiveCharObjectInspector {
+
+    public PhoenixCharObjectInspector() {
+        this(TypeInfoFactory.charTypeInfo);
+    }
+
+    public PhoenixCharObjectInspector(PrimitiveTypeInfo type) {
+        super(type);
+    }
+
+    @Override
+    public Object copyObject(Object o) {
+        return o == null ? null : new String((String) o);
+    }
+
+    @Override
+    public HiveCharWritable getPrimitiveWritableObject(Object o) {
+        return new HiveCharWritable(getPrimitiveJavaObject(o));
+    }
+
+    @Override
+    public HiveChar getPrimitiveJavaObject(Object o) {
+        String value = (String) o;
+        return new HiveChar(value, value.length());
+    }
+
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixDateObjectInspector.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixDateObjectInspector.java
new file mode 100644
index 0000000..7702c64
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixDateObjectInspector.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.objectinspector;
+
+import org.apache.hadoop.hive.common.type.Date;
+import org.apache.hadoop.hive.serde2.io.DateWritableV2;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.DateObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+
+/**
+ * ObjectInspector for date type
+ */
+
+public class PhoenixDateObjectInspector extends AbstractPhoenixObjectInspector<DateWritableV2>
+        implements DateObjectInspector {
+
+    public PhoenixDateObjectInspector() {
+        super(TypeInfoFactory.dateTypeInfo);
+    }
+
+    @Override
+    public Object copyObject(Object o) {
+        return o == null ? null : java.sql.Date.valueOf(o.toString());
+    }
+
+    @Override
+    public Date getPrimitiveJavaObject(Object o) {
+        if (o == null) {
+            return null;
+        }
+        return Date.valueOf(((java.sql.Date) o).toString());
+    }
+
+    @Override
+    public DateWritableV2 getPrimitiveWritableObject(Object o) {
+        DateWritableV2 value = null;
+
+        if (o != null) {
+            try {
+                value = new DateWritableV2(getPrimitiveJavaObject(o));
+            } catch (Exception e) {
+                logExceptionMessage(o, "DATE");
+                value = new DateWritableV2();
+            }
+        }
+
+        return value;
+    }
+
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixDecimalObjectInspector.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixDecimalObjectInspector.java
new file mode 100644
index 0000000..116dc08
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixDecimalObjectInspector.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.objectinspector;
+
+import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.HiveDecimalUtils;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+
+import java.math.BigDecimal;
+
+public class PhoenixDecimalObjectInspector extends
+        AbstractPhoenixObjectInspector<HiveDecimalWritable>
+        implements HiveDecimalObjectInspector {
+
+    public PhoenixDecimalObjectInspector() {
+        this(TypeInfoFactory.decimalTypeInfo);
+    }
+
+    public PhoenixDecimalObjectInspector(PrimitiveTypeInfo typeInfo) {
+        super(typeInfo);
+    }
+
+    @Override
+    public Object copyObject(Object o) {
+        return o == null ? null : new BigDecimal(o.toString());
+    }
+
+    @Override
+    public HiveDecimal getPrimitiveJavaObject(Object o) {
+        if (o == null) {
+            return null;
+        }
+
+        return HiveDecimalUtils.enforcePrecisionScale(HiveDecimal.create((BigDecimal) o),(DecimalTypeInfo)typeInfo);
+    }
+
+    @Override
+    public HiveDecimalWritable getPrimitiveWritableObject(Object o) {
+        HiveDecimalWritable value = null;
+
+        if (o != null) {
+            try {
+                value = new HiveDecimalWritable(getPrimitiveJavaObject(o));
+            } catch (Exception e) {
+                logExceptionMessage(o, "DECIMAL");
+            }
+        }
+
+        return value;
+    }
+
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixDoubleObjectInspector.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixDoubleObjectInspector.java
new file mode 100644
index 0000000..bd1c2e2
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixDoubleObjectInspector.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.objectinspector;
+
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.DoubleObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.hive.serde2.io.DoubleWritable;
+
+/**
+ * ObjectInspector for double type
+ */
+public class PhoenixDoubleObjectInspector extends AbstractPhoenixObjectInspector<DoubleWritable>
+        implements DoubleObjectInspector {
+
+    public PhoenixDoubleObjectInspector() {
+        super(TypeInfoFactory.doubleTypeInfo);
+    }
+
+    @Override
+    public Object copyObject(Object o) {
+        return o == null ? null : new Double((Double) o);
+    }
+
+    @Override
+    public DoubleWritable getPrimitiveWritableObject(Object o) {
+        return new DoubleWritable(get(o));
+    }
+
+    @Override
+    public double get(Object o) {
+        Double value = null;
+
+        if (o != null) {
+            try {
+                value = ((Double) o).doubleValue();
+            } catch (Exception e) {
+                logExceptionMessage(o, "LONG");
+            }
+        }
+
+        return value;
+    }
+
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixFloatObjectInspector.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixFloatObjectInspector.java
new file mode 100644
index 0000000..bf1badc
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixFloatObjectInspector.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.objectinspector;
+
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.FloatObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.io.FloatWritable;
+
+/**
+ * ObjectInspector for float type
+ */
+
+public class PhoenixFloatObjectInspector extends AbstractPhoenixObjectInspector<FloatWritable>
+        implements FloatObjectInspector {
+
+    public PhoenixFloatObjectInspector() {
+        super(TypeInfoFactory.floatTypeInfo);
+    }
+
+    @Override
+    public Object copyObject(Object o) {
+        return o == null ? null : new Float((Float) o);
+    }
+
+    @Override
+    public FloatWritable getPrimitiveWritableObject(Object o) {
+        return new FloatWritable(get(o));
+    }
+
+    @Override
+    public float get(Object o) {
+        Float value = null;
+
+        if (o != null) {
+            try {
+                value = ((Float) o).floatValue();
+            } catch (Exception e) {
+                logExceptionMessage(o, "LONG");
+            }
+        }
+
+        return value;
+    }
+
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixIntObjectInspector.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixIntObjectInspector.java
new file mode 100644
index 0000000..a0d4387
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixIntObjectInspector.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.objectinspector;
+
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.IntObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.io.IntWritable;
+
+public class PhoenixIntObjectInspector extends AbstractPhoenixObjectInspector<IntWritable>
+        implements IntObjectInspector {
+
+    public PhoenixIntObjectInspector() {
+        super(TypeInfoFactory.intTypeInfo);
+    }
+
+    @Override
+    public Object copyObject(Object o) {
+        return o == null ? null : new Integer((Integer) o);
+    }
+
+    @Override
+    public Category getCategory() {
+        return Category.PRIMITIVE;
+    }
+
+    @Override
+    public IntWritable getPrimitiveWritableObject(Object o) {
+        return new IntWritable(get(o));
+    }
+
+    @Override
+    public int get(Object o) {
+        Integer value = null;
+
+        if (o != null) {
+            try {
+                value = ((Integer) o).intValue();
+            } catch (Exception e) {
+                logExceptionMessage(o, "INT");
+            }
+        }
+
+        return value;
+    }
+
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixListObjectInspector.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixListObjectInspector.java
new file mode 100644
index 0000000..07cee37
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixListObjectInspector.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.objectinspector;
+
+import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyObjectInspectorParameters;
+import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.phoenix.schema.types.PhoenixArray;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * ObjectInspector for list objects.
+ */
+public class PhoenixListObjectInspector implements ListObjectInspector {
+
+    private ObjectInspector listElementObjectInspector;
+    private byte separator;
+    private LazyObjectInspectorParameters lazyParams;
+
+    public PhoenixListObjectInspector(ObjectInspector listElementObjectInspector,
+                                      byte separator, LazyObjectInspectorParameters lazyParams) {
+        this.listElementObjectInspector = listElementObjectInspector;
+        this.separator = separator;
+        this.lazyParams = lazyParams;
+    }
+
+    @Override
+    public String getTypeName() {
+        return org.apache.hadoop.hive.serde.serdeConstants.LIST_TYPE_NAME + "<" +
+                listElementObjectInspector.getTypeName() + ">";
+    }
+
+    @Override
+    public Category getCategory() {
+        return Category.LIST;
+    }
+
+    @Override
+    public ObjectInspector getListElementObjectInspector() {
+        return listElementObjectInspector;
+    }
+
+    @Override
+    public Object getListElement(Object data, int index) {
+        if (data == null) {
+            return null;
+        }
+
+        PhoenixArray array = (PhoenixArray) data;
+
+        return array.getElement(index);
+    }
+
+    @Override
+    public int getListLength(Object data) {
+        if (data == null) {
+            return -1;
+        }
+
+        PhoenixArray array = (PhoenixArray) data;
+        return array.getDimensions();
+    }
+
+    @Override
+    public List<?> getList(Object data) {
+        if (data == null) {
+            return null;
+        }
+
+        PhoenixArray array = (PhoenixArray) data;
+        int valueLength = array.getDimensions();
+        List<Object> valueList = new ArrayList<>(valueLength);
+
+        for (int i = 0; i < valueLength; i++) {
+            valueList.add(array.getElement(i));
+        }
+
+        return valueList;
+    }
+
+    public byte getSeparator() {
+        return separator;
+    }
+
+    public LazyObjectInspectorParameters getLazyParams() {
+        return lazyParams;
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixLongObjectInspector.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixLongObjectInspector.java
new file mode 100644
index 0000000..554f2a4
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixLongObjectInspector.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.objectinspector;
+
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.io.LongWritable;
+
+public class PhoenixLongObjectInspector extends AbstractPhoenixObjectInspector<LongWritable>
+        implements LongObjectInspector {
+
+    public PhoenixLongObjectInspector() {
+        super(TypeInfoFactory.longTypeInfo);
+    }
+
+    @Override
+    public Object copyObject(Object o) {
+        return o == null ? null : new Long((Long) o);
+    }
+
+    @Override
+    public LongWritable getPrimitiveWritableObject(Object o) {
+        return new LongWritable(get(o));
+    }
+
+    @Override
+    public long get(Object o) {
+        Long value = null;
+
+        if (o != null) {
+            try {
+                value = ((Long) o).longValue();
+            } catch (Exception e) {
+                logExceptionMessage(o, "LONG");
+            }
+        }
+
+        return value;
+    }
+
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixObjectInspectorFactory.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixObjectInspectorFactory.java
new file mode 100644
index 0000000..3a19ea7
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixObjectInspectorFactory.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.objectinspector;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.serde2.lazy.LazySerDeParameters;
+import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazyObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.ObjectInspectorOptions;
+import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Factory for object inspectors. Matches hive type to the corresponding Phoenix object inspector.
+ */
+
+public class PhoenixObjectInspectorFactory {
+
+    private static final Log LOG = LogFactory.getLog(PhoenixObjectInspectorFactory.class);
+
+    private PhoenixObjectInspectorFactory() {
+
+    }
+
+    public static LazySimpleStructObjectInspector createStructObjectInspector(TypeInfo type,
+                                                                              LazySerDeParameters
+                                                                                      serdeParams) {
+        StructTypeInfo structTypeInfo = (StructTypeInfo) type;
+        List<String> fieldNames = structTypeInfo.getAllStructFieldNames();
+        List<TypeInfo> fieldTypeInfos = structTypeInfo.getAllStructFieldTypeInfos();
+        List<ObjectInspector> fieldObjectInspectors = new ArrayList<ObjectInspector>
+                (fieldTypeInfos.size());
+
+        for (int i = 0; i < fieldTypeInfos.size(); i++) {
+            fieldObjectInspectors.add(createObjectInspector(fieldTypeInfos.get(i), serdeParams));
+        }
+
+        return LazyObjectInspectorFactory.getLazySimpleStructObjectInspector(
+                fieldNames, fieldObjectInspectors, null,
+                serdeParams.getSeparators()[1],
+                serdeParams, ObjectInspectorOptions.JAVA);
+    }
+
+    public static ObjectInspector createObjectInspector(TypeInfo type, LazySerDeParameters
+            serdeParams) {
+        ObjectInspector oi = null;
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Type : " + type);
+        }
+
+        switch (type.getCategory()) {
+            case PRIMITIVE:
+                switch (((PrimitiveTypeInfo) type).getPrimitiveCategory()) {
+                    case BOOLEAN:
+                        oi = new PhoenixBooleanObjectInspector();
+                        break;
+                    case BYTE:
+                        oi = new PhoenixByteObjectInspector();
+                        break;
+                    case SHORT:
+                        oi = new PhoenixShortObjectInspector();
+                        break;
+                    case INT:
+                        oi = new PhoenixIntObjectInspector();
+                        break;
+                    case LONG:
+                        oi = new PhoenixLongObjectInspector();
+                        break;
+                    case FLOAT:
+                        oi = new PhoenixFloatObjectInspector();
+                        break;
+                    case DOUBLE:
+                        oi = new PhoenixDoubleObjectInspector();
+                        break;
+                    case VARCHAR:
+                        // same string
+                    case STRING:
+                        oi = new PhoenixStringObjectInspector(serdeParams.isEscaped(),
+                                serdeParams.getEscapeChar());
+                        break;
+                    case CHAR:
+                        oi = new PhoenixCharObjectInspector((PrimitiveTypeInfo)type);
+                        break;
+                    case DATE:
+                        oi = new PhoenixDateObjectInspector();
+                        break;
+                    case TIMESTAMP:
+                        oi = new PhoenixTimestampObjectInspector();
+                        break;
+                    case DECIMAL:
+                        oi = new PhoenixDecimalObjectInspector((PrimitiveTypeInfo) type);
+                        break;
+                    case BINARY:
+                        oi = new PhoenixBinaryObjectInspector();
+                        break;
+                    default:
+                        throw new RuntimeException("Hive internal error. not supported data type " +
+                                ": " + type);
+                }
+
+                break;
+            case LIST:
+                if (LOG.isDebugEnabled()) {
+                    LOG.debug("List type started");
+                }
+
+                ObjectInspector listElementObjectInspector = createObjectInspector((
+                        (ListTypeInfo) type).getListElementTypeInfo(), serdeParams);
+
+                if (LOG.isDebugEnabled()) {
+                    LOG.debug("List type ended");
+                }
+
+                oi = new PhoenixListObjectInspector(listElementObjectInspector, serdeParams
+                        .getSeparators()[0], serdeParams);
+
+                break;
+            default:
+                throw new RuntimeException("Hive internal error. not supported data type : " +
+                        type);
+        }
+
+        return oi;
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixShortObjectInspector.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixShortObjectInspector.java
new file mode 100644
index 0000000..84529b0
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixShortObjectInspector.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.objectinspector;
+
+import org.apache.hadoop.hive.serde2.io.ShortWritable;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.ShortObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+
+public class PhoenixShortObjectInspector extends AbstractPhoenixObjectInspector<ShortWritable>
+        implements ShortObjectInspector {
+
+    public PhoenixShortObjectInspector() {
+        super(TypeInfoFactory.shortTypeInfo);
+    }
+
+    @Override
+    public Object copyObject(Object o) {
+        return o == null ? null : new Short((Short) o);
+    }
+
+    @Override
+    public ShortWritable getPrimitiveWritableObject(Object o) {
+        return new ShortWritable(get(o));
+    }
+
+    @Override
+    public short get(Object o) {
+        Short value = null;
+
+        if (o != null) {
+            try {
+                value = ((Short) o).shortValue();
+            } catch (Exception e) {
+                logExceptionMessage(o, "SHORT");
+            }
+        }
+
+        return value;
+    }
+
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixStringObjectInspector.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixStringObjectInspector.java
new file mode 100644
index 0000000..e409e1d
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixStringObjectInspector.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.objectinspector;
+
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.io.Text;
+
+/**
+ * ObjectInspector for string type
+ */
+public class PhoenixStringObjectInspector extends AbstractPhoenixObjectInspector<Text>
+        implements StringObjectInspector {
+
+    private boolean escaped;
+    private byte escapeChar;
+
+    public PhoenixStringObjectInspector(boolean escaped, byte escapeChar) {
+        super(TypeInfoFactory.stringTypeInfo);
+        this.escaped = escaped;
+        this.escapeChar = escapeChar;
+    }
+
+    @Override
+    public Object copyObject(Object o) {
+        return o == null ? null : new String((String) o);
+    }
+
+    @Override
+    public String getPrimitiveJavaObject(Object o) {
+        return (String) o;
+    }
+
+    @Override
+    public Text getPrimitiveWritableObject(Object o) {
+        Text value = null;
+
+        if (o != null) {
+            try {
+                value = new Text((String) o);
+            } catch (Exception e) {
+                logExceptionMessage(o, "STRING");
+            }
+        }
+
+        return value;
+    }
+
+    public boolean isEscaped() {
+        return escaped;
+    }
+
+    public byte getEscapeChar() {
+        return escapeChar;
+    }
+
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixTimestampObjectInspector.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixTimestampObjectInspector.java
new file mode 100644
index 0000000..99ad0cc
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixTimestampObjectInspector.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.objectinspector;
+
+import org.apache.hadoop.hive.common.type.Timestamp;
+import org.apache.hadoop.hive.serde2.io.TimestampWritableV2;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+
+/**
+ * ObjectInspector for timestamp type
+ */
+public class PhoenixTimestampObjectInspector extends
+        AbstractPhoenixObjectInspector<TimestampWritableV2>
+        implements TimestampObjectInspector {
+
+    public PhoenixTimestampObjectInspector() {
+        super(TypeInfoFactory.timestampTypeInfo);
+    }
+
+    @Override
+    public Object copyObject(Object o) {
+        return o == null ? null : java.sql.Timestamp.valueOf(o.toString());
+    }
+
+    @Override
+    public Timestamp getPrimitiveJavaObject(Object o) {
+        if (o == null) {
+            return null;
+        }
+        return Timestamp.valueOf(((java.sql.Timestamp) o).toString());
+    }
+
+    @Override
+    public TimestampWritableV2 getPrimitiveWritableObject(Object o) {
+        TimestampWritableV2 value = null;
+
+        if (o != null) {
+            try {
+                value = new TimestampWritableV2(getPrimitiveJavaObject(o));
+            } catch (Exception e) {
+                logExceptionMessage(o, "TIMESTAMP");
+            }
+        }
+
+        return value;
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/ppd/PhoenixPredicateDecomposer.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/ppd/PhoenixPredicateDecomposer.java
new file mode 100644
index 0000000..1e65819
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/ppd/PhoenixPredicateDecomposer.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.ppd;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.ql.metadata.HiveStoragePredicateHandler.DecomposedPredicate;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+import org.apache.phoenix.hive.ql.index.IndexPredicateAnalyzer;
+import org.apache.phoenix.hive.ql.index.IndexSearchCondition;
+import org.apache.phoenix.hive.ql.index.PredicateAnalyzerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Supporting class that generate DecomposedPredicate companion to PhoenixHiveStorageHandler
+ * basing on search conditions.
+ */
+public class PhoenixPredicateDecomposer {
+
+    private static final Log LOG = LogFactory.getLog(PhoenixPredicateDecomposer.class);
+
+    private List<String> columnNameList;
+    private boolean calledPPD;
+
+    private List<IndexSearchCondition> searchConditionList;
+
+    public static PhoenixPredicateDecomposer create(List<String> columnNameList) {
+        return new PhoenixPredicateDecomposer(columnNameList);
+    }
+
+    private PhoenixPredicateDecomposer(List<String> columnNameList) {
+        this.columnNameList = columnNameList;
+    }
+
+    public DecomposedPredicate decomposePredicate(ExprNodeDesc predicate) {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("predicate - " + predicate.toString());
+        }
+
+        IndexPredicateAnalyzer analyzer = PredicateAnalyzerFactory.createPredicateAnalyzer
+                (columnNameList, getFieldValidator());
+        DecomposedPredicate decomposed = new DecomposedPredicate();
+
+        List<IndexSearchCondition> conditions = new ArrayList<IndexSearchCondition>();
+        decomposed.residualPredicate = (ExprNodeGenericFuncDesc) analyzer.analyzePredicate
+                (predicate, conditions);
+        if (!conditions.isEmpty()) {
+            decomposed.pushedPredicate = analyzer.translateSearchConditions(conditions);
+            try {
+                searchConditionList = conditions;
+                calledPPD = true;
+            } catch (Exception e) {
+                LOG.warn("Failed to decompose predicates", e);
+                return null;
+            }
+        }
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("decomposed predicate - residualPredicate: " + decomposed.residualPredicate +
+            ", pushedPredicate: " + decomposed.pushedPredicate);
+        }
+
+        return decomposed;
+    }
+
+    public List<IndexSearchCondition> getSearchConditionList() {
+        return searchConditionList;
+    }
+
+    public boolean isCalledPPD() {
+        return calledPPD;
+    }
+
+    protected IndexPredicateAnalyzer.FieldValidator getFieldValidator() {
+        return null;
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/ql/index/IndexPredicateAnalyzer.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/ql/index/IndexPredicateAnalyzer.java
new file mode 100644
index 0000000..4e77078
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/ql/index/IndexPredicateAnalyzer.java
@@ -0,0 +1,526 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.ql.index;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
+import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
+import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
+import org.apache.hadoop.hive.ql.lib.Dispatcher;
+import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.Node;
+import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
+import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils;
+import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseCompare;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBetween;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNot;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotNull;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNull;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToBinary;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToChar;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToDate;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToDecimal;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToUnixTimeStamp;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToUtcTimestamp;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToVarchar;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.phoenix.hive.util.TypeInfoUtils;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.Stack;
+
+/**
+ * Clone of org.apache.hadoop.hive.ql.index.IndexPredicateAnalyzer with modifying
+ * analyzePredicate method.
+ *
+ *
+ */
+public class IndexPredicateAnalyzer {
+
+    private static final Log LOG = LogFactory.getLog(IndexPredicateAnalyzer.class);
+
+    private final Set<String> udfNames;
+    private final Map<String, Set<String>> columnToUDFs;
+    private FieldValidator fieldValidator;
+
+    private boolean acceptsFields;
+
+    public IndexPredicateAnalyzer() {
+        udfNames = new HashSet<String>();
+        columnToUDFs = new HashMap<String, Set<String>>();
+    }
+
+    public void setFieldValidator(FieldValidator fieldValidator) {
+        this.fieldValidator = fieldValidator;
+    }
+
+    /**
+     * Registers a comparison operator as one which can be satisfied by an index
+     * search. Unless this is called, analyzePredicate will never find any
+     * indexable conditions.
+     *
+     * @param udfName name of comparison operator as returned by either
+     *                {@link GenericUDFBridge#getUdfName} (for simple UDF's) or
+     *                udf.getClass().getName() (for generic UDF's).
+     */
+    public void addComparisonOp(String udfName) {
+        udfNames.add(udfName);
+    }
+
+    /**
+     * Clears the set of column names allowed in comparisons. (Initially, all
+     * column names are allowed.)
+     */
+    public void clearAllowedColumnNames() {
+        columnToUDFs.clear();
+    }
+
+    /**
+     * Adds a column name to the set of column names allowed.
+     *
+     * @param columnName name of column to be allowed
+     */
+    public void allowColumnName(String columnName) {
+        columnToUDFs.put(columnName, udfNames);
+    }
+
+    /**
+     * add allowed functions per column
+     *
+     * @param columnName
+     * @param udfs
+     */
+    public void addComparisonOp(String columnName, String... udfs) {
+        Set<String> allowed = columnToUDFs.get(columnName);
+        if (allowed == null || allowed == udfNames) {
+            // override
+            columnToUDFs.put(columnName, new HashSet<String>(Arrays.asList(udfs)));
+        } else {
+            allowed.addAll(Arrays.asList(udfs));
+        }
+    }
+
+    /**
+     * Analyzes a predicate.
+     *
+     * @param predicate        predicate to be analyzed
+     * @param searchConditions receives conditions produced by analysis
+     * @return residual predicate which could not be translated to
+     * searchConditions
+     */
+    public ExprNodeDesc analyzePredicate(ExprNodeDesc predicate, final List<IndexSearchCondition>
+            searchConditions) {
+
+        Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
+        NodeProcessor nodeProcessor = new NodeProcessor() {
+            @Override
+            public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object...
+                    nodeOutputs) throws SemanticException {
+
+                // We can only push down stuff which appears as part of
+                // a pure conjunction: reject OR, CASE, etc.
+                for (Node ancestor : stack) {
+                    if (nd == ancestor) {
+                        break;
+                    }
+                    if (!FunctionRegistry.isOpAnd((ExprNodeDesc) ancestor)) {
+                        return nd;
+                    }
+                }
+
+                return analyzeExpr((ExprNodeGenericFuncDesc) nd, searchConditions, nodeOutputs);
+            }
+        };
+
+        Dispatcher disp = new DefaultRuleDispatcher(nodeProcessor, opRules, null);
+        GraphWalker ogw = new DefaultGraphWalker(disp);
+        ArrayList<Node> topNodes = new ArrayList<Node>();
+        topNodes.add(predicate);
+        HashMap<Node, Object> nodeOutput = new HashMap<Node, Object>();
+
+        try {
+            ogw.startWalking(topNodes, nodeOutput);
+        } catch (SemanticException ex) {
+            throw new RuntimeException(ex);
+        }
+
+        ExprNodeDesc residualPredicate = (ExprNodeDesc) nodeOutput.get(predicate);
+        return residualPredicate;
+    }
+
+    // Check if ExprNodeColumnDesc is wrapped in expr.
+    // If so, peel off. Otherwise return itself.
+    private ExprNodeDesc getColumnExpr(ExprNodeDesc expr) {
+        if (expr instanceof ExprNodeColumnDesc) {
+            return expr;
+        }
+        ExprNodeGenericFuncDesc funcDesc = null;
+        if (expr instanceof ExprNodeGenericFuncDesc) {
+            funcDesc = (ExprNodeGenericFuncDesc) expr;
+        }
+        if (null == funcDesc) {
+            return expr;
+        }
+        GenericUDF udf = funcDesc.getGenericUDF();
+        // check if its a simple cast expression.
+        if ((udf instanceof GenericUDFBridge || udf instanceof GenericUDFToBinary || udf
+                instanceof GenericUDFToChar
+                || udf instanceof GenericUDFToVarchar || udf instanceof GenericUDFToDecimal
+                || udf instanceof GenericUDFToDate || udf instanceof GenericUDFToUnixTimeStamp
+                || udf instanceof GenericUDFToUtcTimestamp) && funcDesc.getChildren().size() == 1
+                && funcDesc.getChildren().get(0) instanceof ExprNodeColumnDesc) {
+            return expr.getChildren().get(0);
+        }
+        return expr;
+    }
+
+    private void processingBetweenOperator(ExprNodeGenericFuncDesc expr,
+                                           List<IndexSearchCondition> searchConditions, Object...
+                                                   nodeOutputs) {
+        String[] fields = null;
+
+        final boolean isNot = (Boolean) ((ExprNodeConstantDesc) nodeOutputs[0]).getValue();
+        ExprNodeDesc columnNodeDesc = (ExprNodeDesc) nodeOutputs[1];
+
+        if (columnNodeDesc instanceof ExprNodeFieldDesc) {
+            // rowKey field
+            ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) columnNodeDesc;
+            fields = ExprNodeDescUtils.extractFields(fieldDesc);
+
+            ExprNodeDesc[] extracted = ExprNodeDescUtils.extractComparePair((ExprNodeDesc)
+                    nodeOutputs[1], (ExprNodeDesc) nodeOutputs[2]);
+            columnNodeDesc = extracted[0];
+        }
+        addSearchConditionIfPossible(expr, searchConditions, fields, isNot, columnNodeDesc,
+                Arrays.copyOfRange(nodeOutputs, 2, nodeOutputs.length));
+    }
+
+    private void addSearchConditionIfPossible(ExprNodeGenericFuncDesc expr,
+                                              List<IndexSearchCondition> searchConditions,
+                                              String[] fields,
+                                              boolean isNot,
+                                              ExprNodeDesc columnNodeDesc,
+                                              Object[] nodeOutputs) {
+        ExprNodeColumnDesc columnDesc;
+        columnNodeDesc = getColumnExpr(columnNodeDesc);
+        if (!(columnNodeDesc instanceof ExprNodeColumnDesc)) {
+            return;
+        }
+        columnDesc = (ExprNodeColumnDesc) columnNodeDesc;
+
+        String udfName = expr.getGenericUDF().getUdfName();
+        ExprNodeConstantDesc[] constantDescs = null;
+        if (nodeOutputs != null) {
+            constantDescs = extractConstants(columnDesc, nodeOutputs);
+            if (constantDescs == null) {
+                return;
+            }
+        }
+
+        searchConditions.add(new IndexSearchCondition(columnDesc, udfName, constantDescs,
+                expr, fields, isNot));
+    }
+
+    private boolean isAcceptableConstants(ExprNodeDesc columnDesc, ExprNodeDesc constant) {
+        // from(constant) -> to(columnDesc)
+        return TypeInfoUtils.implicitConvertible(constant.getTypeInfo(), columnDesc.getTypeInfo());
+    }
+
+    private ExprNodeConstantDesc[] extractConstants(ExprNodeColumnDesc columnDesc, Object... nodeOutputs) {
+        ExprNodeConstantDesc[] constantDescs = new ExprNodeConstantDesc[nodeOutputs.length];
+        for (int i = 0; i < nodeOutputs.length; i++) {
+            ExprNodeDesc[] extracted =
+                    ExprNodeDescUtils.extractComparePair(columnDesc, (ExprNodeDesc) nodeOutputs[i]);
+            if (extracted == null || !isAcceptableConstants(columnDesc, extracted[1])) {
+                return null;
+            }
+            constantDescs[i] = (ExprNodeConstantDesc) extracted[1];
+        }
+
+        return constantDescs;
+    }
+
+    private void processingInOperator(ExprNodeGenericFuncDesc expr, List<IndexSearchCondition>
+            searchConditions, boolean isNot, Object... nodeOutputs) {
+        ExprNodeDesc columnDesc;
+        String[] fields = null;
+
+        if (LOG.isTraceEnabled()) {
+            LOG.trace("Processing In Operator. nodeOutputs : " + new ArrayList<>(Arrays.asList(nodeOutputs)));
+        }
+
+        columnDesc = (ExprNodeDesc) nodeOutputs[0];
+        if (columnDesc instanceof ExprNodeFieldDesc) {
+            // rowKey field
+            ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) columnDesc;
+            fields = ExprNodeDescUtils.extractFields(fieldDesc);
+
+            ExprNodeDesc[] extracted = ExprNodeDescUtils.extractComparePair((ExprNodeDesc)
+                    nodeOutputs[0], (ExprNodeDesc) nodeOutputs[1]);
+
+            if (extracted == null) {    // adding for tez
+                return;
+            }
+
+            if (LOG.isTraceEnabled()) {
+                LOG.trace("nodeOutputs[0] : " + nodeOutputs[0] + ", nodeOutputs[1] : " +
+                        nodeOutputs[1] + " => " + new ArrayList<>(Arrays.asList(extracted)));
+            }
+
+            columnDesc = extracted[0];
+        }
+
+        addSearchConditionIfPossible(expr, searchConditions, fields, isNot, columnDesc,
+                Arrays.copyOfRange(nodeOutputs, 1, nodeOutputs.length));
+    }
+
+    private void processingNullOperator(ExprNodeGenericFuncDesc expr, List<IndexSearchCondition>
+            searchConditions, Object... nodeOutputs) {
+        ExprNodeDesc columnDesc = null;
+        String[] fields = null;
+
+        columnDesc = (ExprNodeDesc) nodeOutputs[0];
+        if (columnDesc instanceof ExprNodeFieldDesc) {
+            // rowKey field
+            ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) columnDesc;
+            fields = ExprNodeDescUtils.extractFields(fieldDesc);
+
+            ExprNodeDesc[] extracted = ExprNodeDescUtils.extractComparePair(columnDesc,
+                    new ExprNodeConstantDesc());
+            columnDesc = extracted[0];
+        }
+
+        addSearchConditionIfPossible(expr, searchConditions, fields, false, columnDesc, null);
+    }
+
+    private void processingNotNullOperator(ExprNodeGenericFuncDesc expr,
+                                           List<IndexSearchCondition> searchConditions, Object...
+                                                   nodeOutputs) {
+        ExprNodeDesc columnDesc;
+        String[] fields = null;
+
+        columnDesc = (ExprNodeDesc) nodeOutputs[0];
+        if (columnDesc instanceof ExprNodeFieldDesc) {
+            // rowKey field
+            ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) columnDesc;
+            fields = ExprNodeDescUtils.extractFields(fieldDesc);
+
+            ExprNodeDesc[] extracted = ExprNodeDescUtils.extractComparePair(columnDesc,
+                    new ExprNodeConstantDesc());
+            columnDesc = extracted[0];
+        }
+
+        addSearchConditionIfPossible(expr, searchConditions, fields, true, columnDesc, null);
+    }
+
+    private ExprNodeDesc analyzeExpr(ExprNodeGenericFuncDesc expr, List<IndexSearchCondition>
+            searchConditions, Object... nodeOutputs) throws SemanticException {
+
+        if (FunctionRegistry.isOpAnd(expr)) {
+            assert (nodeOutputs.length == 2);
+            ExprNodeDesc residual1 = (ExprNodeDesc)nodeOutputs[0];
+            ExprNodeDesc residual2 = (ExprNodeDesc)nodeOutputs[1];
+            if (residual1 == null) { return residual2; }
+            if (residual2 == null) { return residual1; }
+            List<ExprNodeDesc> residuals = new ArrayList<ExprNodeDesc>();
+            residuals.add(residual1);
+            residuals.add(residual2);
+            return new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, FunctionRegistry
+                    .getGenericUDFForAnd(), residuals);
+        }
+
+        GenericUDF genericUDF = expr.getGenericUDF();
+        if (!(genericUDF instanceof GenericUDFBaseCompare)) {
+            // 2015-10-22 Added by JeongMin Ju : Processing Between/In Operator
+            if (genericUDF instanceof GenericUDFBetween) {
+                // In case of not between, The value of first element of nodeOutputs is true.
+                // otherwise false.
+                processingBetweenOperator(expr, searchConditions, nodeOutputs);
+                return expr;
+            } else if (genericUDF instanceof GenericUDFIn) {
+                // In case of not in operator, in operator exist as child of not operator.
+                processingInOperator(expr, searchConditions, false, nodeOutputs);
+                return expr;
+            } else if (genericUDF instanceof GenericUDFOPNot &&
+                    ((ExprNodeGenericFuncDesc) expr.getChildren().get(0)).getGenericUDF()
+                            instanceof GenericUDFIn) {
+                // In case of not in operator, in operator exist as child of not operator.
+                processingInOperator((ExprNodeGenericFuncDesc) expr.getChildren().get(0),
+                        searchConditions, true, ((ExprNodeGenericFuncDesc) nodeOutputs[0])
+                                .getChildren().toArray());
+                return expr;
+            } else if (genericUDF instanceof GenericUDFOPNull) {
+                processingNullOperator(expr, searchConditions, nodeOutputs);
+                return expr;
+            } else if (genericUDF instanceof GenericUDFOPNotNull) {
+                processingNotNullOperator(expr, searchConditions, nodeOutputs);
+                return expr;
+            } else {
+                return expr;
+            }
+        }
+        ExprNodeDesc expr1 = (ExprNodeDesc) nodeOutputs[0];
+        ExprNodeDesc expr2 = (ExprNodeDesc) nodeOutputs[1];
+        // We may need to peel off the GenericUDFBridge that is added by CBO or
+        // user
+        if (expr1.getTypeInfo().equals(expr2.getTypeInfo())) {
+            expr1 = getColumnExpr(expr1);
+            expr2 = getColumnExpr(expr2);
+        }
+
+        ExprNodeDesc[] extracted = ExprNodeDescUtils.extractComparePair(expr1, expr2);
+        if (extracted == null || (extracted.length > 2 && !acceptsFields)) {
+            return expr;
+        }
+
+        ExprNodeColumnDesc columnDesc;
+        ExprNodeConstantDesc constantDesc;
+        if (extracted[0] instanceof ExprNodeConstantDesc) {
+            genericUDF = genericUDF.flip();
+            columnDesc = (ExprNodeColumnDesc) extracted[1];
+            constantDesc = (ExprNodeConstantDesc) extracted[0];
+        } else {
+            columnDesc = (ExprNodeColumnDesc) extracted[0];
+            constantDesc = (ExprNodeConstantDesc) extracted[1];
+        }
+
+        Set<String> allowed = columnToUDFs.get(columnDesc.getColumn());
+        if (allowed == null) {
+            return expr;
+        }
+
+        String udfName = genericUDF.getUdfName();
+        if (!allowed.contains(genericUDF.getUdfName())) {
+            return expr;
+        }
+
+        String[] fields = null;
+        if (extracted.length > 2) {
+            ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) extracted[2];
+            if (!isValidField(fieldDesc)) {
+                return expr;
+            }
+            fields = ExprNodeDescUtils.extractFields(fieldDesc);
+        }
+
+        // We also need to update the expr so that the index query can be
+        // generated.
+        // Note that, hive does not support UDFToDouble etc in the query text.
+        List<ExprNodeDesc> list = new ArrayList<ExprNodeDesc>();
+        list.add(expr1);
+        list.add(expr2);
+        expr = new ExprNodeGenericFuncDesc(expr.getTypeInfo(), expr.getGenericUDF(), list);
+
+        searchConditions.add(new IndexSearchCondition(columnDesc, udfName, constantDesc, expr,
+                fields));
+
+        // we converted the expression to a search condition, so
+        // remove it from the residual predicate
+        return fields == null ? null : expr;
+    }
+
+    private boolean isValidField(ExprNodeFieldDesc field) {
+        return fieldValidator == null || fieldValidator.validate(field);
+    }
+
+    /**
+     * Translates search conditions back to ExprNodeDesc form (as a left-deep
+     * conjunction).
+     *
+     * @param searchConditions (typically produced by analyzePredicate)
+     * @return ExprNodeGenericFuncDesc form of search conditions
+     */
+    public ExprNodeGenericFuncDesc translateSearchConditions(List<IndexSearchCondition>
+                                                                     searchConditions) {
+
+        ExprNodeGenericFuncDesc expr = null;
+
+        for (IndexSearchCondition searchCondition : searchConditions) {
+            if (expr == null) {
+                expr = searchCondition.getComparisonExpr();
+                continue;
+            }
+
+            List<ExprNodeDesc> children = new ArrayList<ExprNodeDesc>();
+            children.add(expr);
+            children.add(searchCondition.getComparisonExpr());
+            expr = new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, FunctionRegistry
+                    .getGenericUDFForAnd(), children);
+        }
+
+        return expr;
+    }
+
+    public void setAcceptsFields(boolean acceptsFields) {
+        this.acceptsFields = acceptsFields;
+    }
+
+    public static interface FieldValidator {
+        boolean validate(ExprNodeFieldDesc exprNodeDesc);
+    }
+
+    public static IndexPredicateAnalyzer createAnalyzer(boolean equalOnly) {
+        IndexPredicateAnalyzer analyzer = new IndexPredicateAnalyzer();
+        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual");
+
+        if (equalOnly) {
+            return analyzer;
+        }
+
+        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic" +
+                ".GenericUDFOPEqualOrGreaterThan");
+        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic" +
+                ".GenericUDFOPEqualOrLessThan");
+        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan");
+        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan");
+
+        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotEqual");
+        // apply !=
+        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic.GenericUDFBetween");
+        // apply (Not) Between
+        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn");        //
+        // apply (Not) In
+        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn");        //
+        // apply In
+        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNull");
+        // apply Null
+        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotNull");
+        // apply Not Null
+
+        return analyzer;
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/ql/index/IndexSearchCondition.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/ql/index/IndexSearchCondition.java
new file mode 100644
index 0000000..0b5355c
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/ql/index/IndexSearchCondition.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.ql.index;
+
+import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+
+import java.util.Arrays;
+
+/**
+ * IndexSearchCondition represents an individual search condition found by
+ * {@link IndexPredicateAnalyzer}.
+ *
+ */
+public class IndexSearchCondition {
+    private ExprNodeColumnDesc columnDesc;
+    private String comparisonOp;
+    private ExprNodeConstantDesc constantDesc;
+    private ExprNodeGenericFuncDesc comparisonExpr;
+
+    private String[] fields;
+
+    // Support (Not) Between/(Not) In Operator
+    private ExprNodeConstantDesc[] multiConstants;
+    private boolean isNot;
+
+    public IndexSearchCondition(ExprNodeColumnDesc columnDesc, String comparisonOp,
+                                ExprNodeConstantDesc[] multiConstants, ExprNodeGenericFuncDesc
+                                        comparisonExpr, boolean isNot) {
+        this(columnDesc, comparisonOp, multiConstants, comparisonExpr, null, isNot);
+    }
+
+    public IndexSearchCondition(ExprNodeColumnDesc columnDesc, String comparisonOp,
+                                ExprNodeConstantDesc[] multiConstants, ExprNodeGenericFuncDesc
+                                        comparisonExpr, String[] fields, boolean isNot) {
+        this.columnDesc = columnDesc;
+        this.comparisonOp = comparisonOp;
+        this.multiConstants = multiConstants;
+        this.comparisonExpr = comparisonExpr;
+        this.fields = fields;
+        this.isNot = isNot;
+    }
+
+    public ExprNodeConstantDesc[] getConstantDescs() {
+        return multiConstants;
+    }
+
+    public ExprNodeConstantDesc getConstantDesc(int index) {
+        return multiConstants[index];
+    }
+
+    public boolean isNot() {
+        return isNot;
+    }
+    //////////////////////////////////////////////////////////////////////////////
+
+    public IndexSearchCondition(ExprNodeColumnDesc columnDesc, String comparisonOp,
+                                ExprNodeConstantDesc constantDesc, ExprNodeGenericFuncDesc
+                                        comparisonExpr) {
+        this(columnDesc, comparisonOp, constantDesc, comparisonExpr, null);
+    }
+
+    /**
+     * Constructs a search condition, which takes the form
+     * <p>
+     * <pre>
+     * column-ref comparison-op constant-value
+     * </pre>
+     * <p>
+     * .
+     *
+     * @param columnDesc     column being compared
+     * @param comparisonOp   comparison operator, e.g. "=" (taken from
+     *                       GenericUDFBridge.getUdfName())
+     * @param constantDesc   constant value to search for
+     * @param comparisonExpr the original comparison expression
+     */
+    public IndexSearchCondition(ExprNodeColumnDesc columnDesc, String comparisonOp,
+                                ExprNodeConstantDesc constantDesc, ExprNodeGenericFuncDesc
+                                        comparisonExpr, String[] fields) {
+
+        this.columnDesc = columnDesc;
+        this.comparisonOp = comparisonOp;
+        this.constantDesc = constantDesc;
+        this.comparisonExpr = comparisonExpr;
+        this.fields = fields;
+    }
+
+    public void setColumnDesc(ExprNodeColumnDesc columnDesc) {
+        this.columnDesc = columnDesc;
+    }
+
+    public ExprNodeColumnDesc getColumnDesc() {
+        return columnDesc;
+    }
+
+    public void setComparisonOp(String comparisonOp) {
+        this.comparisonOp = comparisonOp;
+    }
+
+    public String getComparisonOp() {
+        return comparisonOp;
+    }
+
+    public void setConstantDesc(ExprNodeConstantDesc constantDesc) {
+        this.constantDesc = constantDesc;
+    }
+
+    public ExprNodeConstantDesc getConstantDesc() {
+        return constantDesc;
+    }
+
+    public void setComparisonExpr(ExprNodeGenericFuncDesc comparisonExpr) {
+        this.comparisonExpr = comparisonExpr;
+    }
+
+    public ExprNodeGenericFuncDesc getComparisonExpr() {
+        ExprNodeGenericFuncDesc ret = comparisonExpr;
+        try {
+            if (GenericUDFIn.class == comparisonExpr.getGenericUDF().getClass() && isNot) {
+                ret = new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo,
+                        FunctionRegistry.getFunctionInfo("not").getGenericUDF(),
+                        Arrays.asList(comparisonExpr));
+            }
+        } catch (SemanticException e) {
+            throw new RuntimeException("hive operator -- never be thrown", e);
+        }
+        return ret;
+    }
+
+    public String[] getFields() {
+        return fields;
+    }
+
+    @Override
+    public String toString() {
+        return comparisonExpr.getExprString();
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/ql/index/PredicateAnalyzerFactory.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/ql/index/PredicateAnalyzerFactory.java
new file mode 100644
index 0000000..b6903b9
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/ql/index/PredicateAnalyzerFactory.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.ql.index;
+
+import org.apache.phoenix.hive.ql.index.IndexPredicateAnalyzer.FieldValidator;
+
+import java.util.List;
+
+public class PredicateAnalyzerFactory {
+    public static IndexPredicateAnalyzer createPredicateAnalyzer(List<String> ppdColumnList,
+                                                                 FieldValidator fieldValdator) {
+        // Create analyzer for conditions  =, <, <=, >, >=
+        IndexPredicateAnalyzer analyzer = IndexPredicateAnalyzer.createAnalyzer(false);
+
+        for (String columnName : ppdColumnList) {
+            analyzer.allowColumnName(columnName);
+        }
+
+        analyzer.setAcceptsFields(true);
+        analyzer.setFieldValidator(fieldValdator);
+
+        return analyzer;
+    }
+
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/query/PhoenixQueryBuilder.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/query/PhoenixQueryBuilder.java
new file mode 100644
index 0000000..91aff1f
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/query/PhoenixQueryBuilder.java
@@ -0,0 +1,851 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.query;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import com.google.common.base.CharMatcher;
+import com.google.common.base.Splitter;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
+import org.apache.phoenix.hive.ql.index.IndexSearchCondition;
+import org.apache.phoenix.hive.util.ColumnMappingUtils;
+import org.apache.phoenix.hive.util.PhoenixStorageHandlerUtil;
+import org.apache.phoenix.hive.util.PhoenixUtil;
+import org.apache.phoenix.util.StringUtil;
+
+import static org.apache.phoenix.hive.util.ColumnMappingUtils.getColumnMappingMap;
+
+/**
+ * Query builder. Produces a query depending on the colummn list and conditions
+ */
+
+public class PhoenixQueryBuilder {
+
+    private static final Log LOG = LogFactory.getLog(PhoenixQueryBuilder.class);
+
+    private static final String QUERY_TEMPLATE = "select $HINT$ $COLUMN_LIST$ from $TABLE_NAME$";
+
+    private static final PhoenixQueryBuilder QUERY_BUILDER = new PhoenixQueryBuilder();
+
+    private PhoenixQueryBuilder() {
+        if (LOG.isInfoEnabled()) {
+            LOG.info("PhoenixQueryBuilder created");
+        }
+    }
+
+    public static PhoenixQueryBuilder getInstance() {
+        return QUERY_BUILDER;
+    }
+
+    private void addConditionColumnToReadColumn(List<String> readColumnList, List<String>
+            conditionColumnList) {
+        if (readColumnList.isEmpty()) {
+            return;
+        }
+
+        for (String conditionColumn : conditionColumnList) {
+            if (!readColumnList.contains(conditionColumn)) {
+                if (LOG.isDebugEnabled()) {
+                    LOG.debug("Condition column " + conditionColumn + " does not exist in " +
+                            "read-columns.");
+                }
+
+                readColumnList.add(conditionColumn);
+            }
+        }
+    }
+
+    private String makeQueryString(JobConf jobConf, String tableName, List<String>
+            readColumnList, String whereClause, String queryTemplate, String hints, Map<String,
+            TypeInfo> columnTypeMap) throws IOException {
+        StringBuilder sql = new StringBuilder();
+        List<String> conditionColumnList = buildWhereClause(jobConf, sql, whereClause, columnTypeMap);
+        readColumnList  = replaceColumns(jobConf, readColumnList);
+
+        if (conditionColumnList.size() > 0) {
+            addConditionColumnToReadColumn(readColumnList, conditionColumnList);
+            sql.insert(0, queryTemplate.replace("$HINT$", hints).replace("$COLUMN_LIST$",
+                    getSelectColumns(jobConf, tableName, readColumnList)).replace("$TABLE_NAME$",
+                    tableName));
+        } else {
+            sql.append(queryTemplate.replace("$HINT$", hints).replace("$COLUMN_LIST$",
+                    getSelectColumns(jobConf, tableName, readColumnList)).replace("$TABLE_NAME$",
+                    tableName));
+        }
+
+        if (LOG.isInfoEnabled()) {
+            LOG.info("Input query : " + sql.toString());
+        }
+
+        return sql.toString();
+    }
+
+    private static String findReplacement(JobConf jobConf, String column) {
+        Map<String, String> columnMappingMap = getColumnMappingMap(jobConf.get
+                (PhoenixStorageHandlerConstants.PHOENIX_COLUMN_MAPPING));
+        if (columnMappingMap != null && columnMappingMap.containsKey(column)) {
+            return columnMappingMap.get(column);
+        } else {
+            return column;
+        }
+    }
+    private static List<String> replaceColumns(JobConf jobConf, List<String> columnList) {
+        Map<String, String> columnMappingMap = getColumnMappingMap(jobConf.get
+                (PhoenixStorageHandlerConstants.PHOENIX_COLUMN_MAPPING));
+        if(columnMappingMap != null) {
+          List<String> newList = new ArrayList<>();
+            for(String column:columnList) {
+                if(columnMappingMap.containsKey(column)) {
+                    newList.add(columnMappingMap.get(column));
+                } else {
+                    newList.add(column);
+                }
+            }
+            return newList;
+        }
+        return null;
+    }
+
+    private String makeQueryString(JobConf jobConf, String tableName, List<String>
+            readColumnList, List<IndexSearchCondition> searchConditions, String queryTemplate,
+                                   String hints) throws IOException {
+        StringBuilder query = new StringBuilder();
+        List<String> conditionColumnList = buildWhereClause(jobConf, query, searchConditions);
+
+        if (conditionColumnList.size() > 0) {
+            readColumnList  = replaceColumns(jobConf, readColumnList);
+            addConditionColumnToReadColumn(readColumnList, conditionColumnList);
+            query.insert(0, queryTemplate.replace("$HINT$", hints).replace("$COLUMN_LIST$",
+                    getSelectColumns(jobConf, tableName, readColumnList)).replace("$TABLE_NAME$",
+                    tableName));
+        } else {
+            readColumnList  = replaceColumns(jobConf, readColumnList);
+            query.append(queryTemplate.replace("$HINT$", hints).replace("$COLUMN_LIST$",
+                    getSelectColumns(jobConf, tableName, readColumnList)).replace("$TABLE_NAME$",
+                    tableName));
+        }
+
+        if (LOG.isInfoEnabled()) {
+            LOG.info("Input query : " + query.toString());
+        }
+
+        return query.toString();
+    }
+
+    private String getSelectColumns(JobConf jobConf, String tableName, List<String>
+            readColumnList) throws IOException {
+        String selectColumns = String.join(PhoenixStorageHandlerConstants.COMMA,
+            ColumnMappingUtils.quoteColumns(readColumnList));
+        if (PhoenixStorageHandlerConstants.EMPTY_STRING.equals(selectColumns)) {
+            selectColumns = "*";
+        } else {
+            if (PhoenixStorageHandlerUtil.isTransactionalTable(jobConf)) {
+                List<String> pkColumnList = PhoenixUtil.getPrimaryKeyColumnList(jobConf, tableName);
+                StringBuilder pkColumns = new StringBuilder();
+
+                for (String pkColumn : pkColumnList) {
+                    if (!readColumnList.contains(pkColumn)) {
+                        pkColumns.append("\"").append(pkColumn).append("\"" + PhoenixStorageHandlerConstants.COMMA);
+                    }
+                }
+
+                selectColumns = pkColumns.toString() + selectColumns;
+            }
+        }
+
+        return selectColumns;
+    }
+
+    public String buildQuery(JobConf jobConf, String tableName, List<String> readColumnList,
+                             String whereClause, Map<String, TypeInfo> columnTypeMap) throws
+            IOException {
+        String hints = getHint(jobConf, tableName);
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Building query with columns : " + readColumnList + " table name : " +
+                    tableName + "  with where conditions : " + whereClause + "  hints : " + hints);
+        }
+
+        return makeQueryString(jobConf, tableName, new ArrayList<>(readColumnList),
+                whereClause, QUERY_TEMPLATE, hints, columnTypeMap);
+    }
+
+    public String buildQuery(JobConf jobConf, String tableName, List<String> readColumnList,
+                             List<IndexSearchCondition> searchConditions) throws IOException {
+        String hints = getHint(jobConf, tableName);
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Building query with columns : " + readColumnList + "  table name : " +
+                    tableName + " search conditions : " + searchConditions + "  hints : " + hints);
+        }
+
+        return makeQueryString(jobConf, tableName,  new ArrayList<>(readColumnList),
+                searchConditions, QUERY_TEMPLATE, hints);
+    }
+
+    private String getHint(JobConf jobConf, String tableName) {
+        StringBuilder hints = new StringBuilder("/*+ ");
+        if (!jobConf.getBoolean(PhoenixStorageHandlerConstants.HBASE_SCAN_CACHEBLOCKS, Boolean
+                .FALSE)) {
+            hints.append("NO_CACHE ");
+        }
+
+        String queryHint = jobConf.get(tableName + PhoenixStorageHandlerConstants
+                .PHOENIX_TABLE_QUERY_HINT);
+        if (queryHint != null) {
+            hints.append(queryHint);
+        }
+        hints.append(" */");
+
+        return hints.toString();
+    }
+
+    private List<String> buildWhereClause(JobConf jobConf, StringBuilder sql, String whereClause,
+                                          Map<String, TypeInfo> columnTypeMap) throws IOException {
+        if (whereClause == null || whereClause.isEmpty()) {
+            return Collections.emptyList();
+        }
+
+        List<String> conditionColumnList = new ArrayList<>();
+        sql.append(" where ");
+
+        whereClause = StringUtils.replaceEach(whereClause, new String[]{"UDFToString"}, new
+                String[]{"to_char"});
+
+        for (String columnName : columnTypeMap.keySet()) {
+            if (whereClause.contains(columnName)) {
+                String column = findReplacement(jobConf, columnName);
+                whereClause = whereClause.replaceAll("\\b" + columnName + "\\b", "\"" + column + "\"");
+                conditionColumnList.add(column);
+
+
+                if (PhoenixStorageHandlerConstants.DATE_TYPE.equals(
+                        columnTypeMap.get(columnName).getTypeName())) {
+                    whereClause = applyDateFunctionUsingRegex(whereClause, column);
+                } else if (PhoenixStorageHandlerConstants.TIMESTAMP_TYPE.equals(
+                        columnTypeMap.get(columnName).getTypeName())) {
+                    whereClause = applyTimestampFunctionUsingRegex(whereClause, column);
+                }
+            }
+        }
+
+        sql.append(whereClause);
+
+        return conditionColumnList;
+    }
+
+    private String applyDateFunctionUsingRegex(String whereClause, String columnName) {
+        whereClause = applyFunctionForCommonOperator(whereClause, columnName, true);
+        whereClause = applyFunctionForBetweenOperator(whereClause, columnName, true);
+        whereClause = applyFunctionForInOperator(whereClause, columnName, true);
+
+        return whereClause;
+    }
+
+    private String applyTimestampFunctionUsingRegex(String whereClause, String columnName) {
+        whereClause = applyFunctionForCommonOperator(whereClause, columnName, false);
+        whereClause = applyFunctionForBetweenOperator(whereClause, columnName, false);
+        whereClause = applyFunctionForInOperator(whereClause, columnName, false);
+
+        return whereClause;
+    }
+
+    private String applyFunctionForCommonOperator(String whereClause, String columnName, boolean
+            isDate) {
+        String targetPattern = isDate ? PhoenixStorageHandlerConstants.DATE_PATTERN :
+                PhoenixStorageHandlerConstants.TIMESTAMP_PATTERN;
+        String pattern = StringUtils.replaceEach(PhoenixStorageHandlerConstants
+                        .COMMON_OPERATOR_PATTERN,
+                new String[]{PhoenixStorageHandlerConstants.COLUMNE_MARKER,
+                        PhoenixStorageHandlerConstants.PATERN_MARKER}, new String[]{columnName,
+                        targetPattern});
+
+        Matcher matcher = Pattern.compile(pattern).matcher(whereClause);
+
+        while (matcher.find()) {
+            String token = matcher.group(1);
+            String datePart = matcher.group(3);
+
+            String convertString = token.replace(datePart, applyFunction(isDate ?
+                    PhoenixStorageHandlerConstants.DATE_FUNCTION_TEMPLETE :
+                    PhoenixStorageHandlerConstants.TIMESTAMP_FUNCTION_TEMPLATE, datePart));
+            whereClause = whereClause.replaceAll(StringUtils.replaceEach(token, new String[]{"(",
+                    ")"}, new String[]{"\\(", "\\)"}), convertString);
+        }
+
+        return whereClause;
+    }
+
+    private String applyFunctionForBetweenOperator(String whereClause, String columnName, boolean
+            isDate) {
+        String targetPattern = isDate ? PhoenixStorageHandlerConstants.DATE_PATTERN :
+                PhoenixStorageHandlerConstants.TIMESTAMP_PATTERN;
+        String pattern = StringUtils.replaceEach(PhoenixStorageHandlerConstants
+                        .BETWEEN_OPERATOR_PATTERN,
+                new String[]{PhoenixStorageHandlerConstants.COLUMNE_MARKER,
+                        PhoenixStorageHandlerConstants.PATERN_MARKER}, new String[]{columnName,
+                        targetPattern});
+
+        Matcher matcher = Pattern.compile(pattern).matcher(whereClause);
+
+        while (matcher.find()) {
+            String token = matcher.group(1);
+            boolean isNot = matcher.group(2) == null ? false : true;
+            String fromDate = matcher.group(3);
+            String toDate = matcher.group(4);
+
+            String convertString = StringUtils.replaceEach(token, new String[]{fromDate, toDate},
+                    new String[]{applyFunction(isDate ? PhoenixStorageHandlerConstants
+                            .DATE_FUNCTION_TEMPLETE : PhoenixStorageHandlerConstants
+                            .TIMESTAMP_FUNCTION_TEMPLATE, fromDate),
+                            applyFunction(isDate ? PhoenixStorageHandlerConstants
+                                    .DATE_FUNCTION_TEMPLETE : PhoenixStorageHandlerConstants
+                                    .TIMESTAMP_FUNCTION_TEMPLATE, toDate)});
+
+            whereClause = whereClause.replaceAll(pattern, convertString);
+        }
+
+        return whereClause;
+    }
+
+    private String applyFunctionForInOperator(String whereClause, String columnName, boolean
+            isDate) {
+        String targetPattern = isDate ? PhoenixStorageHandlerConstants.DATE_PATTERN :
+                PhoenixStorageHandlerConstants.TIMESTAMP_PATTERN;
+        String pattern = StringUtils.replaceEach(PhoenixStorageHandlerConstants.IN_OPERATOR_PATTERN,
+                new String[]{PhoenixStorageHandlerConstants.COLUMNE_MARKER,
+                        PhoenixStorageHandlerConstants.PATERN_MARKER}, new String[]{columnName,
+                        targetPattern});
+        String itemPattern = "(" + targetPattern + ")";
+
+        Matcher matcher = Pattern.compile(pattern).matcher(whereClause);
+
+        while (matcher.find()) {
+            String token = matcher.group(1);
+            Matcher itemMatcher = Pattern.compile(itemPattern).matcher(token);
+            while (itemMatcher.find()) {
+                String item = itemMatcher.group(1);
+
+                token = token.replace(item, applyFunction(isDate ? PhoenixStorageHandlerConstants
+                        .DATE_FUNCTION_TEMPLETE : PhoenixStorageHandlerConstants
+                        .TIMESTAMP_FUNCTION_TEMPLATE, item));
+            }
+
+            whereClause = whereClause.replaceAll(pattern, token);
+        }
+
+        return whereClause;
+    }
+
+    /**
+     * replace value to specific part of pattern.
+     * if pattern is to_date($value$) and value is '2016-01-15'. then return to_date('2016-01-15').
+     * if pattern is cast($value$ as date) and value is '2016-01-15'. then return cast
+     * ('2016-01-15' as date).
+     */
+    private String applyFunction(String pattern, String value) {
+        if (!value.startsWith(PhoenixStorageHandlerConstants.QUOTATION_MARK)) {
+            value = PhoenixStorageHandlerConstants.QUOTATION_MARK + value +
+                    PhoenixStorageHandlerConstants.QUOTATION_MARK;
+        }
+
+        return pattern.replace(PhoenixStorageHandlerConstants.FUNCTION_VALUE_MARKER, value);
+    }
+
+    private String getCompareValueForDateAndTimestampFunction(String compareValue) {
+        if (compareValue.startsWith(PhoenixStorageHandlerConstants.QUOTATION_MARK)) {
+            return compareValue;
+        } else {
+            return PhoenixStorageHandlerConstants.QUOTATION_MARK + compareValue +
+                    PhoenixStorageHandlerConstants.QUOTATION_MARK;
+        }
+    }
+
+    private String applyDateFunction(String whereClause, String columnName) {
+        StringBuilder whereCondition = new StringBuilder();
+        for (Iterator<String> iterator = Splitter.on(CharMatcher.WHITESPACE).omitEmptyStrings()
+                .split(whereClause).iterator(); iterator.hasNext(); whereCondition.append
+                (PhoenixStorageHandlerConstants.SPACE)) {
+            String token = iterator.next();
+            if (isMyCondition(columnName, token)) {
+                whereCondition.append(token);
+
+                String comparator = iterator.next();
+                whereCondition.append(PhoenixStorageHandlerConstants.SPACE);
+                whereCondition.append(comparator).append(PhoenixStorageHandlerConstants.SPACE);
+                if (PhoenixStorageHandlerConstants.BETWEEN_COMPARATOR.equalsIgnoreCase
+                        (comparator)) {
+                    whereCondition.append("to_date(").append
+                            (getCompareValueForDateAndTimestampFunction(iterator.next())).append
+                            (") ").append(iterator.next()).append(PhoenixStorageHandlerConstants
+                            .SPACE)
+                            .append("to_date(");
+
+                    String toCompareValue = iterator.next();
+                    if (toCompareValue.endsWith(PhoenixStorageHandlerConstants
+                            .RIGHT_ROUND_BRACKET)) {
+                        int rightBracketIndex = toCompareValue.indexOf
+                                (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET);
+                        whereCondition.append(getCompareValueForDateAndTimestampFunction
+                                (toCompareValue.substring(0, rightBracketIndex))).append
+                                (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET).append
+                                (toCompareValue.substring(rightBracketIndex));
+                    } else {
+                        whereCondition.append(getCompareValueForDateAndTimestampFunction
+                                (toCompareValue)).append(PhoenixStorageHandlerConstants
+                                .RIGHT_ROUND_BRACKET);
+                    }
+                } else if (PhoenixStorageHandlerConstants.IN_COMPARATOR.equalsIgnoreCase
+                        (comparator)) {
+                    while (iterator.hasNext()) {
+                        String aToken = iterator.next();
+                        if (aToken.equals(PhoenixStorageHandlerConstants.LEFT_ROUND_BRACKET) ||
+                                aToken.equals(PhoenixStorageHandlerConstants.COMMA)) {
+                            whereCondition.append(aToken);
+                        } else if (aToken.equals(PhoenixStorageHandlerConstants
+                                .RIGHT_ROUND_BRACKET)) {
+                            whereCondition.append(aToken);
+                            break;
+                        } else if (aToken.endsWith(PhoenixStorageHandlerConstants
+                                .RIGHT_ROUND_BRACKET)) {
+                            int bracketIndex = aToken.indexOf(PhoenixStorageHandlerConstants
+                                    .RIGHT_ROUND_BRACKET);
+                            whereCondition.append("to_date(").append
+                                    (getCompareValueForDateAndTimestampFunction(aToken.substring
+                                            (0, bracketIndex))).append
+                                    (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET).append
+                                    (aToken.substring(bracketIndex));
+                            break;
+                        } else if (aToken.endsWith(PhoenixStorageHandlerConstants.COMMA)) {
+                            if (aToken.startsWith(PhoenixStorageHandlerConstants
+                                    .LEFT_ROUND_BRACKET)) {
+                                int bracketIndex = aToken.lastIndexOf
+                                        (PhoenixStorageHandlerConstants.LEFT_ROUND_BRACKET);
+                                whereCondition.append(aToken.substring(0, bracketIndex + 1))
+                                        .append("to_date(").append
+                                        (getCompareValueForDateAndTimestampFunction(aToken
+                                                .substring(bracketIndex + 1, aToken.length() - 1)
+                                        )).append("),");
+                            } else {
+                                whereCondition.append("to_date(").append
+                                        (getCompareValueForDateAndTimestampFunction(aToken
+                                                .substring(0, aToken.length() - 1))).append("),");
+                            }
+                        }
+
+                        whereCondition.append(PhoenixStorageHandlerConstants.SPACE);
+                    }
+                } else if (PhoenixStorageHandlerConstants.COMMON_COMPARATOR.contains(comparator)) {
+                    String compareValue = getCompareValueForDateAndTimestampFunction(iterator
+                            .next());
+                    whereCondition.append("to_date(");
+                    if (compareValue.endsWith(PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET)) {
+                        int rightBracketIndex = compareValue.indexOf
+                                (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET);
+                        whereCondition.append(getCompareValueForDateAndTimestampFunction
+                                (compareValue.substring(0, rightBracketIndex))).append
+                                (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET).append
+                                (compareValue.substring(rightBracketIndex));
+                    } else {
+                        whereCondition.append(getCompareValueForDateAndTimestampFunction
+                                (compareValue)).append(PhoenixStorageHandlerConstants
+                                .RIGHT_ROUND_BRACKET);
+                    }
+                }
+            } else {
+                whereCondition.append(token);
+            }
+        }
+
+        return whereCondition.toString();
+    }
+
+    // Assume timestamp value is yyyy-MM-dd HH:mm:ss.SSS
+    private String applyTimestampFunction(String whereClause, String columnName) {
+        StringBuilder whereCondition = new StringBuilder();
+        for (Iterator<String> iterator = Splitter.on(CharMatcher.WHITESPACE).omitEmptyStrings()
+                .split(whereClause).iterator(); iterator.hasNext(); whereCondition.append
+                (PhoenixStorageHandlerConstants.SPACE)) {
+            String token = iterator.next();
+            if (isMyCondition(columnName, token)) {
+                whereCondition.append(token);
+
+                String comparator = iterator.next();
+                whereCondition.append(PhoenixStorageHandlerConstants.SPACE);
+                whereCondition.append(comparator).append(PhoenixStorageHandlerConstants.SPACE);
+                if (PhoenixStorageHandlerConstants.BETWEEN_COMPARATOR.equalsIgnoreCase
+                        (comparator)) {
+                    String fromCompareValue = iterator.next() + PhoenixStorageHandlerConstants
+                            .SPACE + iterator.next();
+                    whereCondition.append("to_timestamp(").append
+                            (getCompareValueForDateAndTimestampFunction(fromCompareValue)).append
+                            (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET);
+                    whereCondition.append(PhoenixStorageHandlerConstants.SPACE).append(iterator
+                            .next()).append(PhoenixStorageHandlerConstants.SPACE);
+                    whereCondition.append("to_timestamp(");
+
+                    String toCompareValue = iterator.next() + PhoenixStorageHandlerConstants
+                            .SPACE + iterator.next();
+                    if (toCompareValue.endsWith(PhoenixStorageHandlerConstants
+                            .RIGHT_ROUND_BRACKET)) {
+                        int rightBracketIndex = toCompareValue.indexOf
+                                (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET);
+                        whereCondition.append(getCompareValueForDateAndTimestampFunction
+                                (toCompareValue.substring(0, rightBracketIndex))).append
+                                (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET).append
+                                (toCompareValue.substring(rightBracketIndex));
+                    } else {
+                        whereCondition.append(getCompareValueForDateAndTimestampFunction
+                                (toCompareValue)).append(PhoenixStorageHandlerConstants
+                                .RIGHT_ROUND_BRACKET);
+                    }
+                } else if (PhoenixStorageHandlerConstants.IN_COMPARATOR.equalsIgnoreCase
+                        (comparator)) {
+                    while (iterator.hasNext()) {
+                        String aToken = iterator.next();
+                        if (aToken.equals(PhoenixStorageHandlerConstants.LEFT_ROUND_BRACKET) ||
+                                aToken.equals(PhoenixStorageHandlerConstants.COMMA)) {
+                            whereCondition.append(aToken);
+                        } else if (aToken.equals(PhoenixStorageHandlerConstants
+                                .RIGHT_ROUND_BRACKET)) {
+                            whereCondition.append(aToken);
+                            break;
+                        } else {
+                            String compareValue = aToken + PhoenixStorageHandlerConstants.SPACE +
+                                    iterator.next();
+
+                            if (compareValue.startsWith(PhoenixStorageHandlerConstants
+                                    .LEFT_ROUND_BRACKET)) {
+                                int leftBracketIndex = compareValue.lastIndexOf
+                                        (PhoenixStorageHandlerConstants.LEFT_ROUND_BRACKET);
+                                whereCondition.append(compareValue.substring(0, leftBracketIndex
+                                        + 1)).append("to_timestamp(");
+
+                                if (compareValue.endsWith(PhoenixStorageHandlerConstants
+                                        .RIGHT_ROUND_BRACKET)) {
+                                    int rightBracketIndex = compareValue.indexOf
+                                            (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET);
+                                    whereCondition.append
+                                            (getCompareValueForDateAndTimestampFunction
+                                                    (compareValue.substring(leftBracketIndex + 1,
+                                                            rightBracketIndex)))
+                                            .append(PhoenixStorageHandlerConstants
+                                                    .RIGHT_ROUND_BRACKET).append(compareValue
+                                            .substring(rightBracketIndex));
+                                } else if (compareValue.endsWith(PhoenixStorageHandlerConstants
+                                        .COMMA)) {
+                                    whereCondition.append
+                                            (getCompareValueForDateAndTimestampFunction
+                                                    (compareValue.substring(leftBracketIndex + 1,
+                                                            compareValue.length() - 1)))
+                                            .append(PhoenixStorageHandlerConstants
+                                                    .RIGHT_ROUND_BRACKET).append
+                                            (PhoenixStorageHandlerConstants.COMMA);
+                                } else {
+                                    whereCondition.append
+                                            (getCompareValueForDateAndTimestampFunction
+                                                    (compareValue.substring(leftBracketIndex + 1)
+                                                    )).append(PhoenixStorageHandlerConstants
+                                            .RIGHT_ROUND_BRACKET);
+                                }
+                            } else if (compareValue.endsWith(PhoenixStorageHandlerConstants
+                                    .RIGHT_ROUND_BRACKET)) {
+                                int rightBracketIndex = compareValue.indexOf
+                                        (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET);
+                                whereCondition.append("to_timestamp(").append
+                                        (getCompareValueForDateAndTimestampFunction(compareValue
+                                                .substring(0, rightBracketIndex)))
+                                        .append(PhoenixStorageHandlerConstants
+                                                .RIGHT_ROUND_BRACKET).append(compareValue
+                                        .substring(rightBracketIndex));
+                                break;
+                            } else if (compareValue.endsWith(PhoenixStorageHandlerConstants
+                                    .COMMA)) {
+                                whereCondition.append("to_timestamp(").append
+                                        (getCompareValueForDateAndTimestampFunction(compareValue
+                                                .substring(0, compareValue.length() - 1))).append
+                                        ("),");
+                            }
+                        }
+
+                        whereCondition.append(PhoenixStorageHandlerConstants.SPACE);
+                    }
+                } else if (PhoenixStorageHandlerConstants.COMMON_COMPARATOR.contains(comparator)) {
+                    String timestampValue = iterator.next() + PhoenixStorageHandlerConstants
+                            .SPACE + iterator.next();
+                    whereCondition.append("to_timestamp(");
+                    if (timestampValue.endsWith(PhoenixStorageHandlerConstants
+                            .RIGHT_ROUND_BRACKET)) {
+                        int rightBracketIndex = timestampValue.indexOf
+                                (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET);
+                        whereCondition.append(getCompareValueForDateAndTimestampFunction
+                                (timestampValue.substring(0, rightBracketIndex))).append
+                                (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET).append
+                                (timestampValue.substring(rightBracketIndex));
+                    } else {
+                        whereCondition.append(getCompareValueForDateAndTimestampFunction
+                                (timestampValue)).append(PhoenixStorageHandlerConstants
+                                .RIGHT_ROUND_BRACKET);
+                    }
+                }
+            } else {
+                whereCondition.append(token);
+            }
+        }
+
+        return whereCondition.toString();
+    }
+
+    private boolean isMyCondition(String columnName, String token) {
+        boolean itsMine = false;
+
+        if (columnName.equals(token)) {
+            itsMine = true;
+        } else if (token.startsWith(PhoenixStorageHandlerConstants.LEFT_ROUND_BRACKET) && token
+                .substring(token.lastIndexOf(PhoenixStorageHandlerConstants.LEFT_ROUND_BRACKET) +
+                        1).equals(columnName)) {
+            itsMine = true;
+        } else if (token.startsWith(PhoenixStorageHandlerConstants.LEFT_ROUND_BRACKET) && token
+                .endsWith(PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET)
+                && token.substring(token.lastIndexOf(PhoenixStorageHandlerConstants
+                .LEFT_ROUND_BRACKET) + 1, token.indexOf(PhoenixStorageHandlerConstants
+                .RIGHT_ROUND_BRACKET)).equals(columnName)) {
+            itsMine = true;
+        }
+
+        return itsMine;
+    }
+
+    protected List<String> buildWhereClause(JobConf jobConf, StringBuilder sql,
+                                            List<IndexSearchCondition> conditions)
+            throws IOException {
+        if (conditions == null || conditions.size() == 0) {
+            return Collections.emptyList();
+        }
+
+        List<String> columns = new ArrayList<>();
+        sql.append(" where ");
+
+        Iterator<IndexSearchCondition> iter = conditions.iterator();
+        appendExpression(jobConf, sql, iter.next(), columns);
+        while (iter.hasNext()) {
+            sql.append(" and ");
+            appendExpression(jobConf, sql, iter.next(), columns);
+        }
+
+        return columns;
+    }
+
+    private void appendExpression(JobConf jobConf, StringBuilder sql, IndexSearchCondition condition,
+                                  List<String> columns) {
+        Expression expr = findExpression(condition);
+        if (expr != null) {
+            sql.append(expr.buildExpressionStringFrom(jobConf, condition));
+            String column = condition.getColumnDesc().getColumn();
+            String rColumn = findReplacement(jobConf, column);
+            if(rColumn != null) {
+                column = rColumn;
+            }
+
+            columns.add(column);
+        }
+    }
+
+    private Expression findExpression(final IndexSearchCondition condition) {
+        for (Expression exp:Expression.values()) {
+            if(exp.isFor(condition)){
+                return exp;
+            }
+        }
+        return null;
+    }
+
+    private static final StrJoiner JOINER_COMMA = new StrJoiner(", ");
+    private static final StrJoiner JOINER_AND = new StrJoiner(" and ");
+    private static final StrJoiner JOINER_SPACE = new StrJoiner(" ");
+
+    private static class StrJoiner{
+        private String delimiter;
+
+        StrJoiner(String delimiter){
+            this.delimiter = delimiter;
+        }
+        public String join(List<String> list){
+            return String.join(this.delimiter,list);
+        }
+    }
+    private enum Expression {
+        EQUAL("UDFOPEqual", "="),
+        GREATER_THAN_OR_EQUAL_TO("UDFOPEqualOrGreaterThan", ">="),
+        GREATER_THAN("UDFOPGreaterThan", ">"),
+        LESS_THAN_OR_EQUAL_TO("UDFOPEqualOrLessThan", "<="),
+        LESS_THAN("UDFOPLessThan", "<"),
+        NOT_EQUAL("UDFOPNotEqual", "!="),
+        BETWEEN("GenericUDFBetween", "between", JOINER_AND,true) {
+            public boolean checkCondition(IndexSearchCondition condition) {
+                return condition.getConstantDescs() != null;
+            }
+        },
+        IN("GenericUDFIn", "in", JOINER_COMMA,true) {
+            public boolean checkCondition(IndexSearchCondition condition) {
+                return condition.getConstantDescs() != null;
+            }
+
+            public String createConstants(final String typeName, ExprNodeConstantDesc[] desc) {
+                return "(" + super.createConstants(typeName, desc) + ")";
+            }
+        },
+        IS_NULL("GenericUDFOPNull", "is null") {
+            public boolean checkCondition(IndexSearchCondition condition) {
+                return true;
+            }
+        },
+        IS_NOT_NULL("GenericUDFOPNotNull", "is not null") {
+            public boolean checkCondition(IndexSearchCondition condition) {
+                return true;
+            }
+        };
+
+        private final String hiveCompOp;
+        private final String sqlCompOp;
+        private final StrJoiner joiner;
+        private final boolean supportNotOperator;
+
+        Expression(String hiveCompOp, String sqlCompOp) {
+            this(hiveCompOp, sqlCompOp, null,null);
+        }
+
+        Expression(String hiveCompOp, String sqlCompOp, StrJoiner joiner, String joiner2) {
+            this(hiveCompOp, sqlCompOp, joiner,false);
+        }
+
+        Expression(String hiveCompOp, String sqlCompOp, StrJoiner joiner, boolean supportNotOp) {
+            this.hiveCompOp = hiveCompOp;
+            this.sqlCompOp = sqlCompOp;
+            this.joiner = joiner;
+            this.supportNotOperator = supportNotOp;
+        }
+
+        public boolean checkCondition(IndexSearchCondition condition) {
+            return condition.getConstantDesc().getValue() != null;
+        }
+
+        public boolean isFor(IndexSearchCondition condition) {
+            return condition.getComparisonOp().endsWith(hiveCompOp) && checkCondition(condition);
+        }
+
+        public String buildExpressionStringFrom(JobConf jobConf, IndexSearchCondition condition) {
+            final String type = condition.getColumnDesc().getTypeString();
+            String column = condition.getColumnDesc().getColumn();
+            String rColumn = findReplacement(jobConf, column);
+            if(rColumn != null) {
+                column = rColumn;
+            }
+            return String.join(" ",
+                    "\"" + column + "\"",
+                    getSqlCompOpString(condition),
+                    joiner != null ? createConstants(type, condition.getConstantDescs()) :
+                            createConstant(type, condition.getConstantDesc()));
+        }
+
+        public String getSqlCompOpString(IndexSearchCondition condition) {
+            return supportNotOperator ?
+                    (condition.isNot() ? "not " : "") + sqlCompOp : sqlCompOp;
+        }
+
+        public String createConstant(String typeName, ExprNodeConstantDesc constantDesc) {
+            if (constantDesc == null) {
+                return StringUtil.EMPTY_STRING;
+            }
+
+            return createConstantString(typeName, String.valueOf(constantDesc.getValue()));
+        }
+
+        public String createConstants(final String typeName, ExprNodeConstantDesc[] constantDesc) {
+            if (constantDesc == null) {
+                return StringUtil.EMPTY_STRING;
+            }
+            List<String> constants = new ArrayList<>();
+            for (ExprNodeConstantDesc s:constantDesc) {
+                constants.add(createConstantString(typeName, String.valueOf(s.getValue())));
+            }
+            return joiner.join(constants);
+        }
+
+        private static class ConstantStringWrapper {
+            private List<String> types;
+            private String prefix;
+            private String postfix;
+
+            ConstantStringWrapper(String type, String prefix, String postfix) {
+                this(new ArrayList<>(Arrays.asList(type)), prefix, postfix);
+            }
+
+            ConstantStringWrapper(List<String> types, String prefix, String postfix) {
+                this.types = types;
+                this.prefix = prefix;
+                this.postfix = postfix;
+            }
+
+            public String apply(final String typeName, String value) {
+                boolean hasMatch = false;
+                for (String type:types){
+                    if (typeName.startsWith(type)) {
+                        hasMatch = true;
+                        break;
+                    }
+                }
+                return hasMatch ? prefix + value + postfix : value;
+            }
+        }
+
+        private static final String SINGLE_QUOTATION = "'";
+        private static List<ConstantStringWrapper> WRAPPERS =  new ArrayList<>(Arrays.asList(
+                new ConstantStringWrapper(new ArrayList<>(Arrays.asList(
+                        serdeConstants.STRING_TYPE_NAME, serdeConstants.CHAR_TYPE_NAME,
+                        serdeConstants.VARCHAR_TYPE_NAME, serdeConstants.DATE_TYPE_NAME,
+                        serdeConstants.TIMESTAMP_TYPE_NAME)
+                ), SINGLE_QUOTATION, SINGLE_QUOTATION),
+                new ConstantStringWrapper(serdeConstants.DATE_TYPE_NAME, "to_date(", ")"),
+                new ConstantStringWrapper(serdeConstants.TIMESTAMP_TYPE_NAME, "to_timestamp(", ")"))
+        );
+
+        private String createConstantString(String typeName, String value) {
+            for (ConstantStringWrapper wrapper : WRAPPERS) {
+                value = wrapper.apply(typeName, value);
+            }
+
+            return value;
+        }
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/util/ColumnMappingUtils.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/util/ColumnMappingUtils.java
new file mode 100644
index 0000000..79c4b43
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/util/ColumnMappingUtils.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.hive.util;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
+
+import java.util.*;
+
+/**
+ * Util class for mapping between Hive and Phoenix column names
+ */
+public class ColumnMappingUtils {
+
+    private static final Log LOG = LogFactory.getLog(ColumnMappingUtils.class);
+
+    public static Map<String, String> getColumnMappingMap(String columnMappings) {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Column mappings : " + columnMappings);
+        }
+
+        if (columnMappings == null || columnMappings.length() == 0) {
+            if (LOG.isInfoEnabled()) {
+                LOG.info("phoenix.column.mapping not set. using field definition");
+            }
+
+            return Collections.emptyMap();
+        }
+
+        Map<String, String> columnMappingMap  = new HashMap<>();
+        for (String item:columnMappings.split(PhoenixStorageHandlerConstants.COMMA)) {
+            String[] kv= item.trim().split(PhoenixStorageHandlerConstants.COLON);
+            columnMappingMap.put(kv[0],kv[1].length()>1?kv[1]:"");
+        }
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Column mapping map : " + columnMappingMap);
+        }
+
+        return columnMappingMap;
+    }
+
+    public static Map<String, String> getReverseColumnMapping(String columnMapping) {
+        Map<String, String> myNewHashMap = new LinkedHashMap<>();
+        Map<String, String> forward = getColumnMappingMap(columnMapping);
+        for(Map.Entry<String, String> entry : forward.entrySet()){
+            myNewHashMap.put(entry.getValue(), entry.getKey());
+        }
+        return myNewHashMap;
+    }
+
+    public static List<String> quoteColumns(List<String> readColumnList) {
+        List<String> newList = new LinkedList<>();
+        for(String column : readColumnList) {
+            newList.add("\""+ column + "\"");
+        }
+        return newList;
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/util/PhoenixConnectionUtil.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/util/PhoenixConnectionUtil.java
new file mode 100644
index 0000000..d5eb86f
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/util/PhoenixConnectionUtil.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.util;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
+
+/**
+ * Set of methods to obtain Connection depending on configuration
+ */
+
+public class PhoenixConnectionUtil {
+
+    private static final Log LOG = LogFactory.getLog(PhoenixConnectionUtil.class);
+
+    public static Connection getInputConnection(final Configuration conf, final Properties props)
+            throws SQLException {
+        String quorum = conf.get(PhoenixStorageHandlerConstants.ZOOKEEPER_QUORUM);
+        quorum = quorum == null ? props.getProperty(PhoenixStorageHandlerConstants
+                .ZOOKEEPER_QUORUM, PhoenixStorageHandlerConstants.DEFAULT_ZOOKEEPER_QUORUM) :
+                quorum;
+
+        int zooKeeperClientPort = conf.getInt(PhoenixStorageHandlerConstants.ZOOKEEPER_PORT, 0);
+        zooKeeperClientPort = zooKeeperClientPort == 0 ?
+                Integer.parseInt(props.getProperty(PhoenixStorageHandlerConstants.ZOOKEEPER_PORT,
+                        String.valueOf(PhoenixStorageHandlerConstants.DEFAULT_ZOOKEEPER_PORT))) :
+                zooKeeperClientPort;
+
+        String zNodeParent = conf.get(PhoenixStorageHandlerConstants.ZOOKEEPER_PARENT);
+        zNodeParent = zNodeParent == null ? props.getProperty(PhoenixStorageHandlerConstants
+                .ZOOKEEPER_PARENT, PhoenixStorageHandlerConstants.DEFAULT_ZOOKEEPER_PARENT) :
+                zNodeParent;
+
+        return getConnection(quorum, zooKeeperClientPort, zNodeParent, PropertiesUtil
+                .combineProperties(props, conf));
+    }
+
+    public static Connection getConnection(final Table table) throws SQLException {
+        Map<String, String> tableParameterMap = table.getParameters();
+
+        String zookeeperQuorum = tableParameterMap.get(PhoenixStorageHandlerConstants
+                .ZOOKEEPER_QUORUM);
+        zookeeperQuorum = zookeeperQuorum == null ? PhoenixStorageHandlerConstants
+                .DEFAULT_ZOOKEEPER_QUORUM : zookeeperQuorum;
+
+        String clientPortString = tableParameterMap.get(PhoenixStorageHandlerConstants
+                .ZOOKEEPER_PORT);
+        int clientPort = clientPortString == null ? PhoenixStorageHandlerConstants
+                .DEFAULT_ZOOKEEPER_PORT : Integer.parseInt(clientPortString);
+
+        String zNodeParent = tableParameterMap.get(PhoenixStorageHandlerConstants.ZOOKEEPER_PARENT);
+        zNodeParent = zNodeParent == null ? PhoenixStorageHandlerConstants
+                .DEFAULT_ZOOKEEPER_PARENT : zNodeParent;
+        try {
+            Class.forName("org.apache.phoenix.jdbc.PhoenixDriver");
+        } catch (ClassNotFoundException e) {
+            LOG.warn(e.getStackTrace());
+        }
+        return DriverManager.getConnection(QueryUtil.getUrl(zookeeperQuorum, clientPort,
+                zNodeParent));
+    }
+
+    private static Connection getConnection(final String quorum, final Integer clientPort, String
+            zNodeParent, Properties props) throws SQLException {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Connection attrs [quorum, port, znode] : " + quorum + ", " + clientPort +
+                    ", " +
+                    zNodeParent);
+        }
+
+        return DriverManager.getConnection(clientPort != null ? QueryUtil.getUrl(quorum,
+                clientPort, zNodeParent) : QueryUtil.getUrl(quorum), props);
+    }
+
+    public static Configuration getConfiguration(JobConf jobConf) {
+        Configuration conf = new Configuration(jobConf);
+        String quorum = conf.get(PhoenixStorageHandlerConstants.ZOOKEEPER_QUORUM);
+        if(quorum!=null) {
+            conf.set(HConstants.ZOOKEEPER_QUORUM, quorum);
+        }
+        int zooKeeperClientPort = conf.getInt(PhoenixStorageHandlerConstants.ZOOKEEPER_PORT, 0);
+        if(zooKeeperClientPort != 0) {
+            conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zooKeeperClientPort);
+        }
+        String zNodeParent = conf.get(PhoenixStorageHandlerConstants.ZOOKEEPER_PARENT);
+        if(zNodeParent != null) {
+            conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, zNodeParent);
+        }
+        return conf;
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/util/PhoenixStorageHandlerUtil.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/util/PhoenixStorageHandlerUtil.java
new file mode 100644
index 0000000..99caee8
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/util/PhoenixStorageHandlerUtil.java
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.util;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.lang.reflect.Array;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.math.BigDecimal;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.Map.Entry;
+import java.util.Properties;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.ConcurrentHashMap;
+import javax.naming.NamingException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.util.Strings;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.ql.io.AcidOutputFormat.Options;
+import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.net.DNS;
+import org.apache.phoenix.hive.PrimaryKeyData;
+import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
+import org.apache.phoenix.hive.ql.index.IndexSearchCondition;
+import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
+
+/**
+ * Misc utils for PhoenixStorageHandler
+ */
+
+public class PhoenixStorageHandlerUtil {
+    private static final Log LOG = LogFactory.getLog(PhoenixStorageHandlerUtil.class);
+    private static final AtomicReference<Method> GET_BUCKET_METHOD_REF = new AtomicReference<>();
+    private static final AtomicReference<Method> GET_BUCKET_ID_METHOD_REF = new AtomicReference<>();
+
+    public static String getTargetTableName(Table table) {
+        Map<String, String> tableParameterMap = table.getParameters();
+        String tableName = tableParameterMap.get(PhoenixStorageHandlerConstants
+                .PHOENIX_TABLE_NAME);
+        if (tableName == null) {
+            tableName = table.getTableName();
+            tableParameterMap.put(PhoenixStorageHandlerConstants.PHOENIX_TABLE_NAME, tableName);
+        }
+
+        return tableName;
+    }
+
+
+    public static Object[] toTypedValues(JobConf jobConf, String typeName, String[] values) throws
+            Exception {
+        Object[] results = new Object[values.length];
+        DateFormat df = null;
+
+        for (int i = 0, limit = values.length; i < limit; i++) {
+            if (serdeConstants.STRING_TYPE_NAME.equals(typeName) ||
+                    typeName.startsWith(serdeConstants.CHAR_TYPE_NAME) ||
+                    typeName.startsWith(serdeConstants.VARCHAR_TYPE_NAME)) {
+                results[i] = values[i];
+            } else if (serdeConstants.INT_TYPE_NAME.equals(typeName)) {
+                results[i] = new Integer(values[i]);
+            } else if (serdeConstants.BIGINT_TYPE_NAME.equals(typeName)) {
+                results[i] = new Long(values[i]);
+            } else if (serdeConstants.DOUBLE_TYPE_NAME.equals(typeName)) {
+                results[i] = new Double(values[i]);
+            } else if (serdeConstants.FLOAT_TYPE_NAME.equals(typeName)) {
+                results[i] = new Float(values[i]);
+            } else if (serdeConstants.SMALLINT_TYPE_NAME.equals(typeName)) {
+                results[i] = new Short(values[i]);
+            } else if (serdeConstants.TINYINT_TYPE_NAME.equals(typeName)) {
+                results[i] = new Byte(values[i]);
+            } else if (serdeConstants.DATE_TYPE_NAME.equals(typeName)) {
+                String dateFormat = jobConf.get(PhoenixStorageHandlerConstants.HBASE_DATE_FORMAT,
+                        PhoenixStorageHandlerConstants.DEFAULT_DATE_FORMAT);
+                df = new SimpleDateFormat(dateFormat);
+                results[i] = new Long(df.parse(values[i]).getTime());
+            } else if (serdeConstants.TIMESTAMP_TYPE_NAME.equals(typeName)) {
+                String timestampFormat = jobConf.get(PhoenixStorageHandlerConstants
+                        .HBASE_TIMESTAMP_FORMAT, PhoenixStorageHandlerConstants
+                        .DEFAULT_TIMESTAMP_FORMAT);
+                df = new SimpleDateFormat(timestampFormat);
+                results[i] = new Long(df.parse(values[i]).getTime());
+            } else if (typeName.contains(serdeConstants.DECIMAL_TYPE_NAME)) {
+                results[i] = new BigDecimal(values[i]);
+            }
+        }
+
+        return results;
+    }
+
+    public static String[] getConstantValues(IndexSearchCondition condition, String comparisonOp) {
+        String[] constantValues = null;
+
+        if (comparisonOp.endsWith("UDFOPEqual") || comparisonOp.endsWith("UDFOPNotEqual")) {
+            constantValues = new String[]{String.valueOf(condition.getConstantDesc().getValue())};
+        } else if (comparisonOp.endsWith("UDFOPEqualOrGreaterThan")) {    // key >= 1
+            constantValues = new String[]{String.valueOf(condition.getConstantDesc().getValue())};
+        } else if (comparisonOp.endsWith("UDFOPGreaterThan")) {        // key > 1
+            constantValues = new String[]{String.valueOf(condition.getConstantDesc().getValue())};
+        } else if (comparisonOp.endsWith("UDFOPEqualOrLessThan")) {    // key <= 1
+            constantValues = new String[]{String.valueOf(condition.getConstantDesc().getValue())};
+        } else if (comparisonOp.endsWith("UDFOPLessThan")) {    // key < 1
+            constantValues = new String[]{String.valueOf(condition.getConstantDesc().getValue())};
+        } else if (comparisonOp.endsWith("GenericUDFBetween")) {
+            constantValues = new String[]{String.valueOf(condition.getConstantDesc(0).getValue()),
+                    String.valueOf(condition.getConstantDesc(1).getValue())};
+        } else if (comparisonOp.endsWith("GenericUDFIn")) {
+            ExprNodeConstantDesc[] constantDescs = condition.getConstantDescs();
+            constantValues = new String[constantDescs.length];
+            for (int i = 0, limit = constantDescs.length; i < limit; i++) {
+                constantValues[i] = String.valueOf(condition.getConstantDesc(i).getValue());
+            }
+        }
+
+        return constantValues;
+    }
+
+    public static String getRegionLocation(HRegionLocation location, Log log) throws IOException {
+        InetSocketAddress isa = new InetSocketAddress(location.getHostname(), location.getPort());
+        if (isa.isUnresolved()) {
+            log.warn("Failed resolve " + isa);
+        }
+        InetAddress regionAddress = isa.getAddress();
+        String regionLocation = null;
+        try {
+            regionLocation = reverseDNS(regionAddress);
+        } catch (NamingException e) {
+            log.warn("Cannot resolve the host name for " + regionAddress + " because of " + e);
+            regionLocation = location.getHostname();
+        }
+
+        return regionLocation;
+    }
+
+    // Copy from org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.reverseDNS
+    private static final Map<InetAddress, String> reverseDNSCacheMap = new ConcurrentHashMap<>();
+
+    private static String reverseDNS(InetAddress ipAddress) throws NamingException,
+            UnknownHostException {
+        String hostName = reverseDNSCacheMap.get(ipAddress);
+
+        if (hostName == null) {
+            String ipAddressString = null;
+            try {
+                ipAddressString = DNS.reverseDns(ipAddress, null);
+            } catch (Exception e) {
+                // We can use InetAddress in case the jndi failed to pull up the reverse DNS entry
+                // from the name service. Also, in case of ipv6, we need to use the InetAddress
+                // since resolving reverse DNS using jndi doesn't work well with ipv6 addresses.
+                ipAddressString = InetAddress.getByName(ipAddress.getHostAddress()).getHostName();
+            }
+
+            if (ipAddressString == null) {
+                throw new UnknownHostException("No host found for " + ipAddress);
+            }
+
+            hostName = Strings.domainNamePointerToHostName(ipAddressString);
+            reverseDNSCacheMap.put(ipAddress, hostName);
+        }
+
+        return hostName;
+    }
+
+    public static String getTableKeyOfSession(JobConf jobConf, String tableName) {
+
+        String sessionId = jobConf.get(PhoenixConfigurationUtil.SESSION_ID);
+        return new StringBuilder("[").append(sessionId).append("]-").append(tableName).toString();
+    }
+
+    public static Map<String, TypeInfo> createColumnTypeMap(JobConf jobConf) {
+        Map<String, TypeInfo> columnTypeMap = new HashMap();
+
+        String[] columnNames = jobConf.get(serdeConstants.LIST_COLUMNS).split
+                (PhoenixStorageHandlerConstants.COMMA);
+        List<TypeInfo> typeInfos =
+                TypeInfoUtils.getTypeInfosFromTypeString(jobConf.get(serdeConstants.LIST_COLUMN_TYPES));
+
+        for (int i = 0, limit = columnNames.length; i < limit; i++) {
+            columnTypeMap.put(columnNames[i], typeInfos.get(i));
+        }
+
+        return columnTypeMap;
+    }
+
+    public static List<String> getReadColumnNames(Configuration conf) {
+        String colNames = conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR);
+        if (colNames != null && !colNames.isEmpty()) {
+            return Arrays.asList(colNames.split(PhoenixStorageHandlerConstants.COMMA));
+        }
+        return Collections.EMPTY_LIST;
+    }
+
+    public static boolean isTransactionalTable(Properties tableProperties) {
+        String tableIsTransactional = tableProperties.getProperty(hive_metastoreConstants
+                .TABLE_IS_TRANSACTIONAL);
+
+        return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true");
+    }
+
+    public static boolean isTransactionalTable(Configuration config) {
+        String tableIsTransactional = config.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
+
+        return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true");
+    }
+
+    public static void printConfiguration(Configuration config) {
+        if (Boolean.getBoolean("dev")) {
+            for (Iterator<Entry<String, String>> iterator = config.iterator(); iterator.hasNext();
+                    ) {
+                Entry<String, String> entry = iterator.next();
+
+                System.out.println(entry.getKey() + "=" + entry.getValue());
+            }
+        }
+    }
+
+    public static String toString(Object obj) {
+        String content = null;
+
+        if (obj instanceof Array) {
+            Object[] values = (Object[]) obj;
+
+            content =
+            String.join(PhoenixStorageHandlerConstants.COMMA, (String[]) values);
+        } else {
+            content = obj.toString();
+        }
+
+        return content;
+    }
+
+    public static Map<?, ?> toMap(byte[] serialized) {
+        ByteArrayInputStream bais = new ByteArrayInputStream(serialized);
+
+        try {
+            return PrimaryKeyData.deserialize(bais).getData();
+        } catch (ClassNotFoundException | IOException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    public static String getOptionsValue(Options options) {
+        StringBuilder content = new StringBuilder();
+
+        int bucket = getBucket(options);
+        String inspectorInfo = options.getInspector().getCategory() + ":" + options.getInspector()
+                .getTypeName();
+        long maxTxnId = options.getMaximumWriteId();
+        long minTxnId = options.getMinimumWriteId();
+        int recordIdColumn = options.getRecordIdColumn();
+        boolean isCompresses = options.isCompressed();
+        boolean isWritingBase = options.isWritingBase();
+
+        content.append("bucket : ").append(bucket).append(", inspectorInfo : ").append
+                (inspectorInfo).append(", minTxnId : ").append(minTxnId).append(", maxTxnId : ")
+                .append(maxTxnId).append(", recordIdColumn : ").append(recordIdColumn);
+        content.append(", isCompressed : ").append(isCompresses).append(", isWritingBase : ")
+                .append(isWritingBase);
+
+        return content.toString();
+    }
+
+    private static int getBucket(Options options) {
+        Method getBucketMethod = GET_BUCKET_METHOD_REF.get();
+        try {
+            if (getBucketMethod == null) {
+                getBucketMethod = Options.class.getMethod("getBucket");
+                GET_BUCKET_METHOD_REF.set(getBucketMethod);
+            }
+            return (int) getBucketMethod.invoke(options);
+        } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException e) {
+            LOG.trace("Failed to invoke Options.getBucket()", e);
+        }
+        Method getBucketIdMethod = GET_BUCKET_ID_METHOD_REF.get();
+        try {
+            if (getBucketIdMethod == null) {
+                getBucketIdMethod = Options.class.getMethod("getBucketId");
+                GET_BUCKET_ID_METHOD_REF.set(getBucketMethod);
+            }
+            return (int) getBucketIdMethod.invoke(options);
+        } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException e) {
+            throw new RuntimeException("Failed to invoke Options.getBucketId()", e);
+        }
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/util/PhoenixUtil.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/util/PhoenixUtil.java
new file mode 100644
index 0000000..b450371
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/util/PhoenixUtil.java
@@ -0,0 +1,208 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.util;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
+import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.schema.MetaDataClient;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.util.ColumnInfo;
+import org.apache.phoenix.util.PhoenixRuntime;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * Misc utils
+ */
+public class PhoenixUtil {
+
+    private static final Log LOG = LogFactory.getLog(PhoenixUtil.class);
+
+    public static String getPhoenixType(String hiveTypeName) {
+        if (hiveTypeName.startsWith("array")) {
+            List<String> tokenList = new ArrayList<>(Arrays.asList(hiveTypeName.split("[<>]")));
+            return getPhoenixType(tokenList.get(1)) + "[]";
+        } else if (hiveTypeName.startsWith("int")) {
+            return "integer";
+        } else if (hiveTypeName.equals("string")) {
+            return "varchar";
+        } else {
+            return hiveTypeName;
+        }
+    }
+
+    public static boolean existTable(Connection conn, String tableName) throws SQLException {
+        boolean exist = false;
+        DatabaseMetaData dbMeta = conn.getMetaData();
+
+        String[] schemaInfo = getTableSchema(tableName.toUpperCase());
+        try (ResultSet rs = dbMeta.getTables(null, schemaInfo[0], schemaInfo[1], null)) {
+            exist = rs.next();
+
+            if (LOG.isDebugEnabled()) {
+                if (exist) {
+                    LOG.debug(rs.getString("TABLE_NAME") + " table exist. ");
+                } else {
+                    LOG.debug("table " + tableName + " doesn't exist.");
+                }
+            }
+        }
+
+        return exist;
+    }
+
+    public static List<String> getPrimaryKeyColumnList(Connection conn, String tableName) throws
+            SQLException {
+        Map<Short, String> primaryKeyColumnInfoMap = new HashMap<>();
+        DatabaseMetaData dbMeta = conn.getMetaData();
+
+        String[] schemaInfo = getTableSchema(tableName.toUpperCase());
+        try (ResultSet rs = dbMeta.getPrimaryKeys(null, schemaInfo[0], schemaInfo[1])) {
+            while (rs.next()) {
+                primaryKeyColumnInfoMap.put(rs.getShort("KEY_SEQ"), rs.getString("COLUMN_NAME"));
+            }
+
+            if (LOG.isDebugEnabled()) {
+                LOG.debug("PK-columns : " + primaryKeyColumnInfoMap);
+            }
+        }
+
+        return new ArrayList<>(primaryKeyColumnInfoMap.values());
+    }
+
+    public static List<String> getPrimaryKeyColumnList(Configuration config, String tableName) {
+        List<String> pkColumnNameList = null;
+
+        try (Connection conn = PhoenixConnectionUtil.getInputConnection(config, new Properties())) {
+            pkColumnNameList = getPrimaryKeyColumnList(conn, tableName);
+        } catch (SQLException e) {
+            throw new RuntimeException(e);
+        }
+
+        return pkColumnNameList;
+    }
+
+    public static void createTable(Connection conn, String createTableStatement) throws
+            SQLException {
+        conn.createStatement().execute(createTableStatement);
+    }
+
+    public static void dropTable(Connection conn, String tableName) throws SQLException {
+        conn.createStatement().execute("drop table " + tableName);
+    }
+
+    public static List<ColumnInfo> getColumnInfoList(Connection conn, String tableName) throws
+            SQLException {
+        List<ColumnInfo> columnInfoList = null;
+
+        try {
+            columnInfoList = PhoenixRuntime.generateColumnInfo(conn, tableName, null);
+        } catch (TableNotFoundException e) {
+            // Exception can be occurred when table create.
+            columnInfoList = Collections.emptyList();
+        }
+
+        return columnInfoList;
+    }
+
+    public static String[] getTableSchema(String tableName) {
+        String[] schemaInfo = new String[2];
+        String[] tokens = tableName.split("\\.");
+
+        if (tokens.length == 2) {
+            schemaInfo = tokens;
+        } else {
+            schemaInfo[1] = tokens[0];
+        }
+
+        return schemaInfo;
+    }
+
+    public static boolean isDisabledWal(MetaDataClient metaDataClient, String tableName) throws
+            SQLException {
+        String[] schemaInfo = getTableSchema(tableName.toUpperCase());
+        MetaDataMutationResult result = metaDataClient.updateCache(schemaInfo[0], schemaInfo[1]);
+        PTable dataTable = result.getTable();
+
+        return dataTable.isWALDisabled();
+    }
+
+    public static void alterTableForWalDisable(Connection conn, String tableName, boolean
+            disableMode) throws SQLException {
+        conn.createStatement().execute("alter table " + tableName + " set disable_wal=" +
+                disableMode);
+    }
+
+    public static void flush(Connection conn, String tableName) throws SQLException {
+        try (Admin admin = ((PhoenixConnection) conn).getQueryServices().getAdmin()) {
+            admin.flush(TableName.valueOf(tableName));
+        } catch (IOException e) {
+            throw new SQLException(e);
+        }
+    }
+
+    public static String constructDeleteStatement(Connection conn, String tableName) throws
+            SQLException {
+        StringBuilder deleteQuery = new StringBuilder("delete from ").append(tableName).append(" " +
+                "where ");
+
+        List<String> primaryKeyColumnList = getPrimaryKeyColumnList(conn, tableName);
+        for (int i = 0, limit = primaryKeyColumnList.size(); i < limit; i++) {
+            String pkColumn = primaryKeyColumnList.get(i);
+            deleteQuery.append(pkColumn).append(PhoenixStorageHandlerConstants.EQUAL).append
+                    (PhoenixStorageHandlerConstants.QUESTION);
+
+            if ((i + 1) != primaryKeyColumnList.size()) {
+                deleteQuery.append(" and ");
+            }
+        }
+
+        return deleteQuery.toString();
+    }
+
+    public static void closeResource(Statement stmt) throws SQLException {
+        if (stmt != null && !stmt.isClosed()) {
+            stmt.close();
+        }
+    }
+
+    public static void closeResource(Connection conn) throws SQLException {
+        if (conn != null && !conn.isClosed()) {
+            conn.close();
+        }
+    }
+}
diff --git a/phoenix-hive3/src/main/java/org/apache/phoenix/hive/util/TypeInfoUtils.java b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/util/TypeInfoUtils.java
new file mode 100644
index 0000000..07da5b9
--- /dev/null
+++ b/phoenix-hive3/src/main/java/org/apache/phoenix/hive/util/TypeInfoUtils.java
@@ -0,0 +1,953 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.util;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.GenericArrayType;
+import java.lang.reflect.Method;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.util.ArrayList;
+import java.util.EnumMap;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.common.type.HiveVarchar;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
+import org.apache.hadoop.hive.serde2.objectinspector.StructField;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.UnionObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils.PrimitiveGrouping;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils.PrimitiveTypeEntry;
+import org.apache.hadoop.hive.serde2.typeinfo.BaseCharTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.BaseCharUtils;
+import org.apache.hadoop.hive.serde2.typeinfo.HiveDecimalUtils;
+import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.hive.serde2.typeinfo.UnionTypeInfo;
+
+/**
+ * TypeInfoUtils.
+ *
+ */
+public final class TypeInfoUtils {
+
+  public static List<PrimitiveCategory> numericTypeList = new ArrayList<PrimitiveCategory>();
+  // The ordering of types here is used to determine which numeric types
+  // are common/convertible to one another. Probably better to rely on the
+  // ordering explicitly defined here than to assume that the enum values
+  // that were arbitrarily assigned in PrimitiveCategory work for our purposes.
+  public static EnumMap<PrimitiveCategory, Integer> numericTypes =
+      new EnumMap<PrimitiveCategory, Integer>(PrimitiveCategory.class);
+
+  static {
+    registerNumericType(PrimitiveCategory.BYTE, 1);
+    registerNumericType(PrimitiveCategory.SHORT, 2);
+    registerNumericType(PrimitiveCategory.INT, 3);
+    registerNumericType(PrimitiveCategory.LONG, 4);
+    registerNumericType(PrimitiveCategory.FLOAT, 5);
+    registerNumericType(PrimitiveCategory.DOUBLE, 6);
+    registerNumericType(PrimitiveCategory.DECIMAL, 7);
+    registerNumericType(PrimitiveCategory.STRING, 8);
+  }
+
+  private TypeInfoUtils() {
+    // prevent instantiation
+  }
+
+  /**
+   * Return the extended TypeInfo from a Java type. By extended TypeInfo, we
+   * allow unknownType for java.lang.Object.
+   *
+   * @param t
+   *          The Java type.
+   * @param m
+   *          The method, only used for generating error messages.
+   */
+  private static TypeInfo getExtendedTypeInfoFromJavaType(Type t, Method m) {
+
+    if (t == Object.class) {
+      return TypeInfoFactory.unknownTypeInfo;
+    }
+
+    if (t instanceof ParameterizedType) {
+      ParameterizedType pt = (ParameterizedType) t;
+      // List?
+      if (List.class == (Class<?>) pt.getRawType()
+          || ArrayList.class == (Class<?>) pt.getRawType()) {
+        return TypeInfoFactory.getListTypeInfo(getExtendedTypeInfoFromJavaType(
+            pt.getActualTypeArguments()[0], m));
+      }
+      // Map?
+      if (Map.class == (Class<?>) pt.getRawType()
+          || HashMap.class == (Class<?>) pt.getRawType()) {
+        return TypeInfoFactory.getMapTypeInfo(getExtendedTypeInfoFromJavaType(
+            pt.getActualTypeArguments()[0], m),
+            getExtendedTypeInfoFromJavaType(pt.getActualTypeArguments()[1], m));
+      }
+      // Otherwise convert t to RawType so we will fall into the following if
+      // block.
+      t = pt.getRawType();
+    }
+
+    // Must be a class.
+    if (!(t instanceof Class)) {
+      throw new RuntimeException("Hive does not understand type " + t
+          + " from " + m);
+    }
+    Class<?> c = (Class<?>) t;
+
+    // Java Primitive Type?
+    if (PrimitiveObjectInspectorUtils.isPrimitiveJavaType(c)) {
+      return TypeInfoUtils
+          .getTypeInfoFromObjectInspector(PrimitiveObjectInspectorFactory
+          .getPrimitiveJavaObjectInspector(PrimitiveObjectInspectorUtils
+          .getTypeEntryFromPrimitiveJavaType(c).primitiveCategory));
+    }
+
+    // Java Primitive Class?
+    if (PrimitiveObjectInspectorUtils.isPrimitiveJavaClass(c)) {
+      return TypeInfoUtils
+          .getTypeInfoFromObjectInspector(PrimitiveObjectInspectorFactory
+          .getPrimitiveJavaObjectInspector(PrimitiveObjectInspectorUtils
+          .getTypeEntryFromPrimitiveJavaClass(c).primitiveCategory));
+    }
+
+    // Primitive Writable class?
+    if (PrimitiveObjectInspectorUtils.isPrimitiveWritableClass(c)) {
+      return TypeInfoUtils
+          .getTypeInfoFromObjectInspector(PrimitiveObjectInspectorFactory
+          .getPrimitiveWritableObjectInspector(PrimitiveObjectInspectorUtils
+          .getTypeEntryFromPrimitiveWritableClass(c).primitiveCategory));
+    }
+
+    // Must be a struct
+    Field[] fields = ObjectInspectorUtils.getDeclaredNonStaticFields(c);
+    ArrayList<String> fieldNames = new ArrayList<String>(fields.length);
+    ArrayList<TypeInfo> fieldTypeInfos = new ArrayList<TypeInfo>(fields.length);
+    for (Field field : fields) {
+      fieldNames.add(field.getName());
+      fieldTypeInfos.add(getExtendedTypeInfoFromJavaType(
+          field.getGenericType(), m));
+    }
+    return TypeInfoFactory.getStructTypeInfo(fieldNames, fieldTypeInfos);
+  }
+
+  /**
+   * Returns the array element type, if the Type is an array (Object[]), or
+   * GenericArrayType (Map<String,String>[]). Otherwise return null.
+   */
+  public static Type getArrayElementType(Type t) {
+    if (t instanceof Class && ((Class<?>) t).isArray()) {
+      Class<?> arrayClass = (Class<?>) t;
+      return arrayClass.getComponentType();
+    } else if (t instanceof GenericArrayType) {
+      GenericArrayType arrayType = (GenericArrayType) t;
+      return arrayType.getGenericComponentType();
+    }
+    return null;
+  }
+
+  /**
+   * Get the parameter TypeInfo for a method.
+   *
+   * @param size
+   *          In case the last parameter of Method is an array, we will try to
+   *          return a List<TypeInfo> with the specified size by repeating the
+   *          element of the array at the end. In case the size is smaller than
+   *          the minimum possible number of arguments for the method, null will
+   *          be returned.
+   */
+  public static List<TypeInfo> getParameterTypeInfos(Method m, int size) {
+    Type[] methodParameterTypes = m.getGenericParameterTypes();
+
+    // Whether the method takes variable-length arguments
+    // Whether the method takes an array like Object[],
+    // or String[] etc in the last argument.
+    Type lastParaElementType = TypeInfoUtils
+        .getArrayElementType(methodParameterTypes.length == 0 ? null
+        : methodParameterTypes[methodParameterTypes.length - 1]);
+    boolean isVariableLengthArgument = (lastParaElementType != null);
+
+    List<TypeInfo> typeInfos = null;
+    if (!isVariableLengthArgument) {
+      // Normal case, no variable-length arguments
+      if (size != methodParameterTypes.length) {
+        return null;
+      }
+      typeInfos = new ArrayList<TypeInfo>(methodParameterTypes.length);
+      for (Type methodParameterType : methodParameterTypes) {
+        typeInfos.add(getExtendedTypeInfoFromJavaType(methodParameterType, m));
+      }
+    } else {
+      // Variable-length arguments
+      if (size < methodParameterTypes.length - 1) {
+        return null;
+      }
+      typeInfos = new ArrayList<TypeInfo>(size);
+      for (int i = 0; i < methodParameterTypes.length - 1; i++) {
+        typeInfos.add(getExtendedTypeInfoFromJavaType(methodParameterTypes[i],
+            m));
+      }
+      for (int i = methodParameterTypes.length - 1; i < size; i++) {
+        typeInfos.add(getExtendedTypeInfoFromJavaType(lastParaElementType, m));
+      }
+    }
+    return typeInfos;
+  }
+
+  public static boolean hasParameters(String typeName) {
+    int idx = typeName.indexOf('(');
+    if (idx == -1) {
+      return false;
+    } else {
+      return true;
+    }
+  }
+
+  public static String getBaseName(String typeName) {
+    int idx = typeName.indexOf('(');
+    if (idx == -1) {
+      return typeName;
+    } else {
+      return typeName.substring(0, idx);
+    }
+  }
+
+  /**
+   * returns true if both TypeInfos are of primitive type, and the primitive category matches.
+   * @param ti1
+   * @param ti2
+   * @return
+   */
+  public static boolean doPrimitiveCategoriesMatch(TypeInfo ti1, TypeInfo ti2) {
+    if (ti1.getCategory() == Category.PRIMITIVE && ti2.getCategory() == Category.PRIMITIVE) {
+      if (((PrimitiveTypeInfo)ti1).getPrimitiveCategory()
+          == ((PrimitiveTypeInfo)ti2).getPrimitiveCategory()) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
+   * Parse a recursive TypeInfo list String. For example, the following inputs
+   * are valid inputs:
+   * "int,string,map<string,int>,list<map<int,list<string>>>,list<struct<a:int,b:string>>"
+   * The separators between TypeInfos can be ",", ":", or ";".
+   *
+   * In order to use this class: TypeInfoParser parser = new
+   * TypeInfoParser("int,string"); ArrayList<TypeInfo> typeInfos =
+   * parser.parseTypeInfos();
+   */
+  private static class TypeInfoParser {
+
+    private static class Token {
+      public int position;
+      public String text;
+      public boolean isType;
+
+      @Override
+      public String toString() {
+        return "" + position + ":" + text;
+      }
+    };
+
+    private static boolean isTypeChar(char c) {
+      return Character.isLetterOrDigit(c) || c == '_' || c == '.' || c == ' ' || c == '$';
+    }
+
+    /**
+     * Tokenize the typeInfoString. The rule is simple: all consecutive
+     * alphadigits and '_', '.' are in one token, and all other characters are
+     * one character per token.
+     *
+     * tokenize("map<int,string>") should return
+     * ["map","<","int",",","string",">"]
+     *
+     * Note that we add '$' in new Calcite return path. As '$' will not appear
+     * in any type in Hive, it is safe to do so.
+     */
+    private static ArrayList<Token> tokenize(String typeInfoString) {
+      ArrayList<Token> tokens = new ArrayList<Token>(0);
+      int begin = 0;
+      int end = 1;
+      while (end <= typeInfoString.length()) {
+        // last character ends a token?
+        if (end == typeInfoString.length()
+            || !isTypeChar(typeInfoString.charAt(end - 1))
+            || !isTypeChar(typeInfoString.charAt(end))) {
+          Token t = new Token();
+          t.position = begin;
+          t.text = typeInfoString.substring(begin, end);
+          t.isType = isTypeChar(typeInfoString.charAt(begin));
+          tokens.add(t);
+          begin = end;
+        }
+        end++;
+      }
+      return tokens;
+    }
+
+    public TypeInfoParser(String typeInfoString) {
+      this.typeInfoString = typeInfoString;
+      typeInfoTokens = tokenize(typeInfoString);
+    }
+
+    private final String typeInfoString;
+    private final ArrayList<Token> typeInfoTokens;
+    private ArrayList<TypeInfo> typeInfos;
+    private int iToken;
+
+    public ArrayList<TypeInfo> parseTypeInfos() {
+      typeInfos = new ArrayList<TypeInfo>();
+      iToken = 0;
+      while (iToken < typeInfoTokens.size()) {
+        typeInfos.add(parseType());
+        if (iToken < typeInfoTokens.size()) {
+          Token separator = typeInfoTokens.get(iToken);
+          if (",".equals(separator.text) || ";".equals(separator.text)
+              || ":".equals(separator.text)) {
+            iToken++;
+          } else {
+            throw new IllegalArgumentException(
+                "Error: ',', ':', or ';' expected at position "
+                + separator.position + " from '" + typeInfoString + "' "
+                + typeInfoTokens);
+          }
+        }
+      }
+      return typeInfos;
+    }
+
+    private Token peek() {
+      if (iToken < typeInfoTokens.size()) {
+        return typeInfoTokens.get(iToken);
+      } else {
+        return null;
+      }
+    }
+
+    private Token expect(String item) {
+      return expect(item, null);
+    }
+
+    private Token expect(String item, String alternative) {
+      if (iToken >= typeInfoTokens.size()) {
+        throw new IllegalArgumentException("Error: " + item
+            + " expected at the end of '" + typeInfoString + "'");
+      }
+      Token t = typeInfoTokens.get(iToken);
+      if (item.equals("type")) {
+        if (!serdeConstants.LIST_TYPE_NAME.equals(t.text)
+            && !serdeConstants.MAP_TYPE_NAME.equals(t.text)
+            && !serdeConstants.STRUCT_TYPE_NAME.equals(t.text)
+            && !serdeConstants.UNION_TYPE_NAME.equals(t.text)
+            && null == PrimitiveObjectInspectorUtils
+            .getTypeEntryFromTypeName(t.text)
+            && !t.text.equals(alternative)) {
+          throw new IllegalArgumentException("Error: " + item
+              + " expected at the position " + t.position + " of '"
+              + typeInfoString + "' but '" + t.text + "' is found.");
+        }
+      } else if (item.equals("name")) {
+        if (!t.isType && !t.text.equals(alternative)) {
+          throw new IllegalArgumentException("Error: " + item
+              + " expected at the position " + t.position + " of '"
+              + typeInfoString + "' but '" + t.text + "' is found.");
+        }
+      } else {
+        if (!item.equals(t.text) && !t.text.equals(alternative)) {
+          throw new IllegalArgumentException("Error: " + item
+              + " expected at the position " + t.position + " of '"
+              + typeInfoString + "' but '" + t.text + "' is found.");
+        }
+      }
+      iToken++;
+      return t;
+    }
+
+    private String[] parseParams() {
+      List<String> params = new LinkedList<String>();
+
+      Token t = peek();
+      if (t != null && t.text.equals("(")) {
+        expect("(");
+
+        // checking for null in the for-loop condition prevents null-ptr exception
+        // and allows us to fail more gracefully with a parsing error.
+        for(t = peek(); (t == null) || !t.text.equals(")"); t = expect(",",")")) {
+          params.add(expect("name").text);
+        }
+        if (params.size() == 0) {
+          throw new IllegalArgumentException(
+              "type parameters expected for type string " + typeInfoString);
+        }
+      }
+
+      return params.toArray(new String[params.size()]);
+    }
+
+    private TypeInfo parseType() {
+
+      Token t = expect("type");
+
+      // Is this a primitive type?
+      PrimitiveTypeEntry typeEntry =
+          PrimitiveObjectInspectorUtils.getTypeEntryFromTypeName(t.text);
+      if (typeEntry != null && typeEntry.primitiveCategory != PrimitiveCategory.UNKNOWN ) {
+        String[] params = parseParams();
+        switch (typeEntry.primitiveCategory) {
+        case CHAR:
+        case VARCHAR:
+          if (params == null || params.length == 0) {
+            throw new IllegalArgumentException(typeEntry.typeName
+                + " type is specified without length: " + typeInfoString);
+          }
+
+          int length = 1;
+          if (params.length == 1) {
+            length = Integer.parseInt(params[0]);
+            if (typeEntry.primitiveCategory == PrimitiveCategory.VARCHAR) {
+              BaseCharUtils.validateVarcharParameter(length);
+              return TypeInfoFactory.getVarcharTypeInfo(length);
+            } else {
+              BaseCharUtils.validateCharParameter(length);
+              return TypeInfoFactory.getCharTypeInfo(length);
+            }
+          } else if (params.length > 1) {
+            throw new IllegalArgumentException(
+                "Type " + typeEntry.typeName+ " only takes one parameter, but " +
+                params.length + " is seen");
+          }
+        case DECIMAL:
+          int precision = HiveDecimal.USER_DEFAULT_PRECISION;
+          int scale = HiveDecimal.USER_DEFAULT_SCALE;
+          if (params == null || params.length == 0) {
+            // It's possible that old metadata still refers to "decimal" as a column type w/o
+            // precision/scale. In this case, the default (10,0) is assumed. Thus, do nothing here.
+          } else if (params.length == 2) {
+            // New metadata always have two parameters.
+            precision = Integer.parseInt(params[0]);
+            scale = Integer.parseInt(params[1]);
+            HiveDecimalUtils.validateParameter(precision, scale);
+          } else if (params.length > 2) {
+            throw new IllegalArgumentException("Type decimal only takes two parameter, but " +
+                params.length + " is seen");
+          }
+
+          return TypeInfoFactory.getDecimalTypeInfo(precision, scale);
+        default:
+          return TypeInfoFactory.getPrimitiveTypeInfo(typeEntry.typeName);
+        }
+      }
+
+      // Is this a list type?
+      if (serdeConstants.LIST_TYPE_NAME.equals(t.text)) {
+        expect("<");
+        TypeInfo listElementType = parseType();
+        expect(">");
+        return TypeInfoFactory.getListTypeInfo(listElementType);
+      }
+
+      // Is this a map type?
+      if (serdeConstants.MAP_TYPE_NAME.equals(t.text)) {
+        expect("<");
+        TypeInfo mapKeyType = parseType();
+        expect(",");
+        TypeInfo mapValueType = parseType();
+        expect(">");
+        return TypeInfoFactory.getMapTypeInfo(mapKeyType, mapValueType);
+      }
+
+      // Is this a struct type?
+      if (serdeConstants.STRUCT_TYPE_NAME.equals(t.text)) {
+        ArrayList<String> fieldNames = new ArrayList<String>();
+        ArrayList<TypeInfo> fieldTypeInfos = new ArrayList<TypeInfo>();
+        boolean first = true;
+        do {
+          if (first) {
+            expect("<");
+            first = false;
+          } else {
+            Token separator = expect(">", ",");
+            if (separator.text.equals(">")) {
+              // end of struct
+              break;
+            }
+          }
+          Token name = expect("name",">");
+          if (name.text.equals(">")) {
+            break;
+          }
+          fieldNames.add(name.text);
+          expect(":");
+          fieldTypeInfos.add(parseType());
+        } while (true);
+
+        return TypeInfoFactory.getStructTypeInfo(fieldNames, fieldTypeInfos);
+      }
+      // Is this a union type?
+      if (serdeConstants.UNION_TYPE_NAME.equals(t.text)) {
+        List<TypeInfo> objectTypeInfos = new ArrayList<TypeInfo>();
+        boolean first = true;
+        do {
+          if (first) {
+            expect("<");
+            first = false;
+          } else {
+            Token separator = expect(">", ",");
+            if (separator.text.equals(">")) {
+              // end of union
+              break;
+            }
+          }
+          objectTypeInfos.add(parseType());
+        } while (true);
+
+        return TypeInfoFactory.getUnionTypeInfo(objectTypeInfos);
+      }
+
+      throw new RuntimeException("Internal error parsing position "
+          + t.position + " of '" + typeInfoString + "'");
+    }
+
+    public PrimitiveParts parsePrimitiveParts() {
+      PrimitiveParts parts = new PrimitiveParts();
+      Token t = expect("type");
+      parts.typeName = t.text;
+      parts.typeParams = parseParams();
+      return parts;
+    }
+  }
+
+  public static class PrimitiveParts {
+    public String  typeName;
+    public String[] typeParams;
+  }
+
+  /**
+   * Make some of the TypeInfo parsing available as a utility.
+   */
+  public static PrimitiveParts parsePrimitiveParts(String typeInfoString) {
+    TypeInfoParser parser = new TypeInfoParser(typeInfoString);
+    return parser.parsePrimitiveParts();
+  }
+
+  static ConcurrentHashMap<TypeInfo, ObjectInspector> cachedStandardObjectInspector =
+      new ConcurrentHashMap<TypeInfo, ObjectInspector>();
+
+  /**
+   * Returns the standard object inspector that can be used to translate an
+   * object of that typeInfo to a standard object type.
+   */
+  public static ObjectInspector getStandardWritableObjectInspectorFromTypeInfo(
+      TypeInfo typeInfo) {
+    ObjectInspector result = cachedStandardObjectInspector.get(typeInfo);
+    if (result == null) {
+      switch (typeInfo.getCategory()) {
+      case PRIMITIVE: {
+        result = PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(
+            (PrimitiveTypeInfo) typeInfo);
+        break;
+      }
+      case LIST: {
+        ObjectInspector elementObjectInspector =
+            getStandardWritableObjectInspectorFromTypeInfo(((ListTypeInfo) typeInfo)
+            .getListElementTypeInfo());
+        result = ObjectInspectorFactory
+            .getStandardListObjectInspector(elementObjectInspector);
+        break;
+      }
+      case MAP: {
+        MapTypeInfo mapTypeInfo = (MapTypeInfo) typeInfo;
+        ObjectInspector keyObjectInspector =
+            getStandardWritableObjectInspectorFromTypeInfo(mapTypeInfo.getMapKeyTypeInfo());
+        ObjectInspector valueObjectInspector =
+            getStandardWritableObjectInspectorFromTypeInfo(mapTypeInfo.getMapValueTypeInfo());
+        result = ObjectInspectorFactory.getStandardMapObjectInspector(
+            keyObjectInspector, valueObjectInspector);
+        break;
+      }
+      case STRUCT: {
+        StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
+        List<String> fieldNames = structTypeInfo.getAllStructFieldNames();
+        List<TypeInfo> fieldTypeInfos = structTypeInfo
+            .getAllStructFieldTypeInfos();
+        List<ObjectInspector> fieldObjectInspectors = new ArrayList<ObjectInspector>(
+            fieldTypeInfos.size());
+        for (int i = 0; i < fieldTypeInfos.size(); i++) {
+          fieldObjectInspectors
+              .add(getStandardWritableObjectInspectorFromTypeInfo(fieldTypeInfos
+              .get(i)));
+        }
+        result = ObjectInspectorFactory.getStandardStructObjectInspector(
+            fieldNames, fieldObjectInspectors);
+        break;
+      }
+      case UNION: {
+        UnionTypeInfo unionTypeInfo = (UnionTypeInfo) typeInfo;
+        List<TypeInfo> objectTypeInfos = unionTypeInfo
+            .getAllUnionObjectTypeInfos();
+        List<ObjectInspector> fieldObjectInspectors =
+          new ArrayList<ObjectInspector>(objectTypeInfos.size());
+        for (int i = 0; i < objectTypeInfos.size(); i++) {
+          fieldObjectInspectors
+              .add(getStandardWritableObjectInspectorFromTypeInfo(objectTypeInfos
+              .get(i)));
+        }
+        result = ObjectInspectorFactory.getStandardUnionObjectInspector(
+            fieldObjectInspectors);
+        break;
+      }
+
+      default: {
+        result = null;
+      }
+      }
+      ObjectInspector prev =
+        cachedStandardObjectInspector.putIfAbsent(typeInfo, result);
+      if (prev != null) {
+        result = prev;
+      }
+    }
+    return result;
+  }
+
+  static ConcurrentHashMap<TypeInfo, ObjectInspector> cachedStandardJavaObjectInspector =
+      new ConcurrentHashMap<TypeInfo, ObjectInspector>();
+
+  /**
+   * Returns the standard object inspector that can be used to translate an
+   * object of that typeInfo to a standard object type.
+   */
+  public static ObjectInspector getStandardJavaObjectInspectorFromTypeInfo(
+      TypeInfo typeInfo) {
+    ObjectInspector result = cachedStandardJavaObjectInspector.get(typeInfo);
+    if (result == null) {
+      switch (typeInfo.getCategory()) {
+      case PRIMITIVE: {
+        // NOTE: we use JavaPrimitiveObjectInspector instead of
+        // StandardPrimitiveObjectInspector
+        result = PrimitiveObjectInspectorFactory
+            .getPrimitiveJavaObjectInspector((PrimitiveTypeInfo) typeInfo);
+        break;
+      }
+      case LIST: {
+        ObjectInspector elementObjectInspector =
+            getStandardJavaObjectInspectorFromTypeInfo(((ListTypeInfo) typeInfo)
+            .getListElementTypeInfo());
+        result = ObjectInspectorFactory
+            .getStandardListObjectInspector(elementObjectInspector);
+        break;
+      }
+      case MAP: {
+        MapTypeInfo mapTypeInfo = (MapTypeInfo) typeInfo;
+        ObjectInspector keyObjectInspector = getStandardJavaObjectInspectorFromTypeInfo(mapTypeInfo
+            .getMapKeyTypeInfo());
+        ObjectInspector valueObjectInspector =
+            getStandardJavaObjectInspectorFromTypeInfo(mapTypeInfo.getMapValueTypeInfo());
+        result = ObjectInspectorFactory.getStandardMapObjectInspector(
+            keyObjectInspector, valueObjectInspector);
+        break;
+      }
+      case STRUCT: {
+        StructTypeInfo strucTypeInfo = (StructTypeInfo) typeInfo;
+        List<String> fieldNames = strucTypeInfo.getAllStructFieldNames();
+        List<TypeInfo> fieldTypeInfos = strucTypeInfo
+            .getAllStructFieldTypeInfos();
+        List<ObjectInspector> fieldObjectInspectors = new ArrayList<ObjectInspector>(
+            fieldTypeInfos.size());
+        for (int i = 0; i < fieldTypeInfos.size(); i++) {
+          fieldObjectInspectors
+              .add(getStandardJavaObjectInspectorFromTypeInfo(fieldTypeInfos
+              .get(i)));
+        }
+        result = ObjectInspectorFactory.getStandardStructObjectInspector(
+            fieldNames, fieldObjectInspectors);
+        break;
+      }
+      case UNION: {
+        UnionTypeInfo unionTypeInfo = (UnionTypeInfo) typeInfo;
+        List<TypeInfo> objectTypeInfos = unionTypeInfo
+            .getAllUnionObjectTypeInfos();
+        List<ObjectInspector> fieldObjectInspectors =
+          new ArrayList<ObjectInspector>(objectTypeInfos.size());
+        for (int i = 0; i < objectTypeInfos.size(); i++) {
+          fieldObjectInspectors
+              .add(getStandardJavaObjectInspectorFromTypeInfo(objectTypeInfos
+              .get(i)));
+        }
+        result = ObjectInspectorFactory.getStandardUnionObjectInspector(
+            fieldObjectInspectors);
+        break;
+      }
+     default: {
+        result = null;
+      }
+      }
+      ObjectInspector prev =
+        cachedStandardJavaObjectInspector.putIfAbsent(typeInfo, result);
+      if (prev != null) {
+        result = prev;
+      }
+    }
+    return result;
+  }
+
+  /**
+   * Get the TypeInfo object from the ObjectInspector object by recursively
+   * going into the ObjectInspector structure.
+   */
+  public static TypeInfo getTypeInfoFromObjectInspector(ObjectInspector oi) {
+    // OPTIMIZATION for later.
+    // if (oi instanceof TypeInfoBasedObjectInspector) {
+    // TypeInfoBasedObjectInspector typeInfoBasedObjectInspector =
+    // (ObjectInspector)oi;
+    // return typeInfoBasedObjectInspector.getTypeInfo();
+    // }
+    if (oi == null) {
+      return null;
+    }
+
+    // Recursively going into ObjectInspector structure
+    TypeInfo result = null;
+    switch (oi.getCategory()) {
+    case PRIMITIVE: {
+      PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi;
+      result = poi.getTypeInfo();
+      break;
+    }
+    case LIST: {
+      ListObjectInspector loi = (ListObjectInspector) oi;
+      result = TypeInfoFactory
+          .getListTypeInfo(getTypeInfoFromObjectInspector(loi
+          .getListElementObjectInspector()));
+      break;
+    }
+    case MAP: {
+      MapObjectInspector moi = (MapObjectInspector) oi;
+      result = TypeInfoFactory.getMapTypeInfo(
+          getTypeInfoFromObjectInspector(moi.getMapKeyObjectInspector()),
+          getTypeInfoFromObjectInspector(moi.getMapValueObjectInspector()));
+      break;
+    }
+    case STRUCT: {
+      StructObjectInspector soi = (StructObjectInspector) oi;
+      List<? extends StructField> fields = soi.getAllStructFieldRefs();
+      List<String> fieldNames = new ArrayList<String>(fields.size());
+      List<TypeInfo> fieldTypeInfos = new ArrayList<TypeInfo>(fields.size());
+      for (StructField f : fields) {
+        fieldNames.add(f.getFieldName());
+        fieldTypeInfos.add(getTypeInfoFromObjectInspector(f
+            .getFieldObjectInspector()));
+      }
+      result = TypeInfoFactory.getStructTypeInfo(fieldNames, fieldTypeInfos);
+      break;
+    }
+    case UNION: {
+      UnionObjectInspector uoi = (UnionObjectInspector) oi;
+      List<TypeInfo> objectTypeInfos = new ArrayList<TypeInfo>();
+      for (ObjectInspector eoi : uoi.getObjectInspectors()) {
+        objectTypeInfos.add(getTypeInfoFromObjectInspector(eoi));
+      }
+      result = TypeInfoFactory.getUnionTypeInfo(objectTypeInfos);
+      break;
+    }
+    default: {
+      throw new RuntimeException("Unknown ObjectInspector category!");
+    }
+    }
+    return result;
+  }
+
+  public static ArrayList<TypeInfo> typeInfosFromStructObjectInspector(
+      StructObjectInspector structObjectInspector) {
+
+    List<? extends StructField> fields = structObjectInspector.getAllStructFieldRefs();
+    ArrayList<TypeInfo> typeInfoList = new ArrayList<TypeInfo>(fields.size());
+
+    for(StructField field : fields) {
+      TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(
+          field.getFieldObjectInspector().getTypeName());
+      typeInfoList.add(typeInfo);
+    }
+    return typeInfoList;
+  }
+
+  public static ArrayList<TypeInfo> typeInfosFromTypeNames(List<String> typeNames) {
+
+    ArrayList<TypeInfo> result = new ArrayList<TypeInfo>(typeNames.size());
+
+    for(int i = 0; i < typeNames.size(); i++) {
+      TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeNames.get(i));
+      result.add(typeInfo);
+    }
+    return result;
+  }
+
+  public static ArrayList<TypeInfo> getTypeInfosFromTypeString(String typeString) {
+    TypeInfoParser parser = new TypeInfoParser(typeString);
+    return parser.parseTypeInfos();
+  }
+
+  public static List<String> getTypeStringsFromTypeInfo(List<TypeInfo> typeInfos) {
+    if (typeInfos == null) {
+      return null;
+    }
+
+    List<String> result = new ArrayList<>(typeInfos.size());
+    for (TypeInfo typeInfo : typeInfos) {
+      result.add(typeInfo.toString());
+    }
+    return result;
+  }
+
+  public static TypeInfo getTypeInfoFromTypeString(String typeString) {
+    TypeInfoParser parser = new TypeInfoParser(typeString);
+    return parser.parseTypeInfos().get(0);
+  }
+
+  /**
+   * Given two types, determine whether conversion needs to occur to compare the two types.
+   * This is needed for cases like varchar, where the TypeInfo for varchar(10) != varchar(5),
+   * but there would be no need to have to convert to compare these values.
+   * @param typeA
+   * @param typeB
+   * @return
+   */
+  public static boolean isConversionRequiredForComparison(TypeInfo typeA, TypeInfo typeB) {
+    if (typeA.equals(typeB)) {
+      return false;
+    }
+
+    if (TypeInfoUtils.doPrimitiveCategoriesMatch(typeA, typeB)) {
+      return false;
+    }
+
+    return true;
+  }
+
+  /**
+   * Return the character length of the type
+   * @param typeInfo
+   * @return
+   */
+  public static int getCharacterLengthForType(PrimitiveTypeInfo typeInfo) {
+    switch (typeInfo.getPrimitiveCategory()) {
+      case STRING:
+        return HiveVarchar.MAX_VARCHAR_LENGTH;
+      case CHAR:
+      case VARCHAR:
+        BaseCharTypeInfo baseCharTypeInfo = (BaseCharTypeInfo) typeInfo;
+        return baseCharTypeInfo.getLength();
+      default:
+        return 0;
+    }
+  }
+
+  public static void registerNumericType(PrimitiveCategory primitiveCategory, int level) {
+    numericTypeList.add(primitiveCategory);
+    numericTypes.put(primitiveCategory, level);
+  }
+
+  public static boolean implicitConvertible(PrimitiveCategory from, PrimitiveCategory to) {
+    if (from == to) {
+      return true;
+    }
+
+    PrimitiveGrouping fromPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(from);
+    PrimitiveGrouping toPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(to);
+
+    // Allow implicit String to Double conversion
+    if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DOUBLE) {
+      return true;
+    }
+    // Allow implicit String to Decimal conversion
+    if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DECIMAL) {
+      return true;
+    }
+    // Void can be converted to any type
+    if (from == PrimitiveCategory.VOID) {
+      return true;
+    }
+
+    // Allow implicit String to Date conversion
+    if (fromPg == PrimitiveGrouping.DATE_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) {
+      return true;
+    }
+    // Allow implicit Numeric to String conversion
+    if (fromPg == PrimitiveGrouping.NUMERIC_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) {
+      return true;
+    }
+    // Allow implicit String to varchar conversion, and vice versa
+    if (fromPg == PrimitiveGrouping.STRING_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) {
+      return true;
+    }
+
+    // Allow implicit conversion from Byte -> Integer -> Long -> Float -> Double
+    // Decimal -> String
+    Integer f = numericTypes.get(from);
+    Integer t = numericTypes.get(to);
+    if (f == null || t == null) {
+      return false;
+    }
+    if (f.intValue() > t.intValue()) {
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * Returns whether it is possible to implicitly convert an object of Class
+   * from to Class to.
+   */
+  public static boolean implicitConvertible(TypeInfo from, TypeInfo to) {
+    if (from.equals(to)) {
+      return true;
+    }
+
+    // Reimplemented to use PrimitiveCategory rather than TypeInfo, because
+    // 2 TypeInfos from the same qualified type (varchar, decimal) should still be
+    // seen as equivalent.
+    if (from.getCategory() == Category.PRIMITIVE && to.getCategory() == Category.PRIMITIVE) {
+      return implicitConvertible(
+          ((PrimitiveTypeInfo) from).getPrimitiveCategory(),
+          ((PrimitiveTypeInfo) to).getPrimitiveCategory());
+    }
+    return false;
+  }
+}
diff --git a/phoenix-hive3/src/test/java/org/apache/phoenix/hive/PrimaryKeyDataTest.java b/phoenix-hive3/src/test/java/org/apache/phoenix/hive/PrimaryKeyDataTest.java
new file mode 100644
index 0000000..3b2634f
--- /dev/null
+++ b/phoenix-hive3/src/test/java/org/apache/phoenix/hive/PrimaryKeyDataTest.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InvalidClassException;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+import java.util.HashMap;
+
+import org.junit.Test;
+
+public class PrimaryKeyDataTest {
+    private static class Disallowed implements Serializable {
+        private static final long serialVersionUID = 1L;
+    }
+
+    private byte[] serialize(Object o) throws IOException {
+        ByteArrayOutputStream baos = new ByteArrayOutputStream();
+        try (ObjectOutputStream oos = new ObjectOutputStream(baos)) {
+            oos.writeObject(o);
+        }
+        return baos.toByteArray();
+    }
+
+    @Test
+    public void testSerde() throws Exception {
+        HashMap<String,Object> data = new HashMap<>();
+        data.put("one", 1);
+        data.put("two", "two");
+        data.put("three", 3);
+
+        PrimaryKeyData pkData = new PrimaryKeyData(data);
+        ByteArrayOutputStream baos = new ByteArrayOutputStream();
+        pkData.serialize(baos);
+
+        PrimaryKeyData pkCopy = PrimaryKeyData.deserialize(new ByteArrayInputStream(baos.toByteArray()));
+        assertEquals(data, pkCopy.getData());
+    }
+
+    @Test
+    public void testDisallowedDeserialization() throws Exception {
+        byte[] serializedMap = serialize(new HashMap<String,Object>());
+        byte[] serializedClass = serialize(new Disallowed());
+        byte[] serializedString = serialize("asdf");
+
+        try {
+            PrimaryKeyData.deserialize(new ByteArrayInputStream(serializedMap));
+            fail("Expected an InvalidClassException");
+        } catch (InvalidClassException e) {}
+        try {
+            PrimaryKeyData.deserialize(new ByteArrayInputStream(serializedClass));
+            fail("Expected an InvalidClassException");
+        } catch (InvalidClassException e) {}
+        try {
+            PrimaryKeyData.deserialize(new ByteArrayInputStream(serializedString));
+            fail("Expected an InvalidClassException");
+        } catch (InvalidClassException e) {}
+    }
+}
diff --git a/phoenix-hive3/src/test/java/org/apache/phoenix/hive/query/PhoenixQueryBuilderTest.java b/phoenix-hive3/src/test/java/org/apache/phoenix/hive/query/PhoenixQueryBuilderTest.java
new file mode 100644
index 0000000..e27347e
--- /dev/null
+++ b/phoenix-hive3/src/test/java/org/apache/phoenix/hive/query/PhoenixQueryBuilderTest.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hive.query;
+
+import org.apache.commons.lang3.ArrayUtils;
+import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.phoenix.hive.ql.index.IndexSearchCondition;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+import static org.junit.Assert.assertEquals;
+
+public class PhoenixQueryBuilderTest {
+    private static final PhoenixQueryBuilder BUILDER = PhoenixQueryBuilder.getInstance();
+    private static final String TABLE_NAME = "TEST_TABLE";
+
+    private IndexSearchCondition mockedIndexSearchCondition(String comparisionOp,
+                                                            Object constantValue,
+                                                            Object[] constantValues,
+                                                            String columnName,
+                                                            String typeString,
+                                                            boolean isNot) {
+        IndexSearchCondition condition = mock(IndexSearchCondition.class);
+        when(condition.getComparisonOp()).thenReturn(comparisionOp);
+
+        if (constantValue != null) {
+            ExprNodeConstantDesc constantDesc = mock(ExprNodeConstantDesc.class);
+            when(constantDesc.getValue()).thenReturn(constantValue);
+            when(condition.getConstantDesc()).thenReturn(constantDesc);
+        }
+
+        ExprNodeColumnDesc columnDesc = mock(ExprNodeColumnDesc.class);
+        when(columnDesc.getColumn()).thenReturn(columnName);
+        when(columnDesc.getTypeString()).thenReturn(typeString);
+        when(condition.getColumnDesc()).thenReturn(columnDesc);
+
+
+        if (ArrayUtils.isNotEmpty(constantValues)) {
+            ExprNodeConstantDesc[] constantDescs = new ExprNodeConstantDesc[constantValues.length];
+            for (int i = 0; i < constantDescs.length; i++) {
+                constantDescs[i] = mock(ExprNodeConstantDesc.class);
+                when(condition.getConstantDesc(i)).thenReturn(constantDescs[i]);
+                when(constantDescs[i].getValue()).thenReturn(constantValues[i]);
+            }
+            when(condition.getConstantDescs()).thenReturn(constantDescs);
+        }
+
+        when(condition.isNot()).thenReturn(isNot);
+
+        return condition;
+    }
+
+    @Test
+    public void testBuildQueryWithCharColumns() throws IOException {
+        final String COLUMN_CHAR = "Column_Char";
+        final String COLUMN_VARCHAR = "Column_VChar";
+        final String expectedQueryPrefix = "select /*+ NO_CACHE  */ \"" + COLUMN_CHAR + "\",\"" + COLUMN_VARCHAR +
+                "\" from TEST_TABLE where ";
+
+        JobConf jobConf = new JobConf();
+        List<String> readColumnList = new ArrayList<>(Arrays.asList(COLUMN_CHAR, COLUMN_VARCHAR));
+        List<IndexSearchCondition> searchConditions = new ArrayList<>(Arrays.asList(
+                mockedIndexSearchCondition("GenericUDFOPEqual", "CHAR_VALUE", null, COLUMN_CHAR, "char(10)", false),
+                mockedIndexSearchCondition("GenericUDFOPEqual", "CHAR_VALUE2", null, COLUMN_VARCHAR, "varchar(10)", false)
+        ));
+
+        assertEquals(expectedQueryPrefix + "\"Column_Char\" = 'CHAR_VALUE' and \"Column_VChar\" = 'CHAR_VALUE2'",
+                BUILDER.buildQuery(jobConf, TABLE_NAME, readColumnList, searchConditions));
+
+        searchConditions = new ArrayList<>(Arrays.asList(
+                mockedIndexSearchCondition("GenericUDFIn", null,
+                        new Object[]{"CHAR1", "CHAR2", "CHAR3"}, COLUMN_CHAR, "char(10)", false))
+        );
+
+        assertEquals(expectedQueryPrefix + "\"Column_Char\" in ('CHAR1', 'CHAR2', 'CHAR3')",
+                BUILDER.buildQuery(jobConf, TABLE_NAME, readColumnList, searchConditions));
+
+        searchConditions = new ArrayList<>(Arrays.asList(
+                mockedIndexSearchCondition("GenericUDFIn", null,
+                        new Object[]{"CHAR1", "CHAR2", "CHAR3"}, COLUMN_CHAR, "char(10)", true))
+        );
+
+        assertEquals(expectedQueryPrefix + "\"Column_Char\" not in ('CHAR1', 'CHAR2', 'CHAR3')",
+                BUILDER.buildQuery(jobConf, TABLE_NAME, readColumnList, searchConditions));
+
+        searchConditions = new ArrayList<>(Arrays.asList(
+                mockedIndexSearchCondition("GenericUDFBetween", null,
+                        new Object[]{"CHAR1", "CHAR2"}, COLUMN_CHAR, "char(10)", false))
+        );
+
+        assertEquals(expectedQueryPrefix + "\"Column_Char\" between 'CHAR1' and 'CHAR2'",
+                BUILDER.buildQuery(jobConf, TABLE_NAME, readColumnList, searchConditions));
+
+        searchConditions = new ArrayList<>(Arrays.asList(
+                mockedIndexSearchCondition("GenericUDFBetween", null,
+                        new Object[]{"CHAR1", "CHAR2"}, COLUMN_CHAR, "char(10)", true))
+        );
+
+        assertEquals(expectedQueryPrefix + "\"Column_Char\" not between 'CHAR1' and 'CHAR2'",
+                BUILDER.buildQuery(jobConf, TABLE_NAME, readColumnList, searchConditions));
+    }
+
+    @Test
+    public void testBuildBetweenQueryWithDateColumns() throws IOException {
+        final String COLUMN_DATE = "Column_Date";
+        final String tableName = "TEST_TABLE";
+        final String expectedQueryPrefix = "select /*+ NO_CACHE  */ \"" + COLUMN_DATE +
+                "\" from " + tableName + " where ";
+
+        JobConf jobConf = new JobConf();
+        List<String> readColumnList = new ArrayList<>(Arrays.asList(COLUMN_DATE));
+
+        List<IndexSearchCondition> searchConditions = new ArrayList<>(Arrays.asList(
+                mockedIndexSearchCondition("GenericUDFBetween", null,
+                        new Object[]{"1992-01-02", "1992-02-02"}, COLUMN_DATE, "date", false)
+        ));
+
+        assertEquals(expectedQueryPrefix +
+                        "\"" + COLUMN_DATE + "\" between to_date('1992-01-02') and to_date('1992-02-02')",
+                BUILDER.buildQuery(jobConf, TABLE_NAME, readColumnList, searchConditions));
+
+        searchConditions = new ArrayList<>(Arrays.asList(
+                mockedIndexSearchCondition("GenericUDFBetween", null,
+                        new Object[]{"1992-01-02", "1992-02-02"}, COLUMN_DATE, "date", true)
+        ));
+
+        assertEquals(expectedQueryPrefix +
+                        "\"" + COLUMN_DATE + "\" not between to_date('1992-01-02') and to_date('1992-02-02')",
+                BUILDER.buildQuery(jobConf, TABLE_NAME, readColumnList, searchConditions));
+    }
+
+    @Test
+    public void testBuildQueryWithNotNull() throws IOException {
+        final String COLUMN_DATE = "Column_Date";
+        final String tableName = "TEST_TABLE";
+        final String expectedQueryPrefix = "select /*+ NO_CACHE  */ \"" + COLUMN_DATE +
+                "\" from " + tableName + " where ";
+
+        JobConf jobConf = new JobConf();
+        List<String> readColumnList =new ArrayList<>(Arrays.asList(COLUMN_DATE));
+
+        List<IndexSearchCondition> searchConditions = new ArrayList<>(Arrays.asList(
+                mockedIndexSearchCondition("GenericUDFOPNotNull", null,
+                        null, COLUMN_DATE, "date", true))
+        );
+
+        assertEquals(expectedQueryPrefix +
+                        "\"" + COLUMN_DATE + "\" is not null ",
+                BUILDER.buildQuery(jobConf, TABLE_NAME, readColumnList, searchConditions));
+    }
+
+    @Test
+    public void testBuildQueryWithBigintColumns() throws IOException {
+        final String COLUMN_BIGINT = "Column_Bigint";
+        final String tableName = "TEST_TABLE";
+        final String expectedQueryPrefix = "select /*+ NO_CACHE  */ \"" + COLUMN_BIGINT +
+          "\" from " + tableName + " where ";
+
+        JobConf jobConf = new JobConf();
+        List<String> readColumnList = new ArrayList<>(Arrays.asList(COLUMN_BIGINT));
+
+        List<IndexSearchCondition> searchConditions = new ArrayList<>(Arrays.asList(
+          mockedIndexSearchCondition("GenericUDFOPEqual", 100L,
+            null, COLUMN_BIGINT, "bigint", false))
+        );
+
+        assertEquals(expectedQueryPrefix + "\"" + COLUMN_BIGINT + "\" = 100",
+          BUILDER.buildQuery(jobConf, TABLE_NAME, readColumnList, searchConditions));
+    }
+}
diff --git a/phoenix-hive3/src/test/resources/hbase-site.xml b/phoenix-hive3/src/test/resources/hbase-site.xml
new file mode 100644
index 0000000..d185eb7
--- /dev/null
+++ b/phoenix-hive3/src/test/resources/hbase-site.xml
@@ -0,0 +1,10 @@
+<configuration>
+  <property>
+    <name>hbase.wal.provider</name>
+    <value>filesystem</value>
+  </property>
+  <property>
+    <name>hbase.wal.meta_provider</name>
+    <value>filesystem</value>
+  </property>
+</configuration>
\ No newline at end of file
diff --git a/phoenix-hive3/src/test/resources/hive-site.xml b/phoenix-hive3/src/test/resources/hive-site.xml
new file mode 100644
index 0000000..143a829
--- /dev/null
+++ b/phoenix-hive3/src/test/resources/hive-site.xml
@@ -0,0 +1,123 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+
+<property>
+  <name>hive.in.test</name>
+  <value>true</value>
+  <description>Internal marker for test. Used for masking env-dependent values</description>
+</property>
+
+<property>
+  <name>hive.tez.container.size</name>
+  <value>128</value>
+  <description></description>
+</property>
+
+<property>
+  <name>phoenix.log.buffer.size</name>
+  <value>1024</value>
+  <description></description>
+</property>
+
+
+<property>
+  <name>datanucleus.schema.autoCreateAll</name>
+  <value>true</value>
+</property>
+
+
+<property>
+  <name>hive.metastore.schema.verification</name>
+  <value>false</value>
+</property>
+
+<property>
+  <name>hive.query.results.cache.enabled</name>
+  <value>false</value>
+</property>
+
+<property>
+  <name>hive.fetch.task.conversion</name>
+  <value>minimal</value>
+</property>
+
+<property>
+  <name>hive.auto.convert.join</name>
+  <value>false</value>
+  <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file size</description>
+</property>
+
+<property>
+  <name>hive.ignore.mapjoin.hint</name>
+  <value>false</value>
+  <description>Whether Hive ignores the mapjoin hint</description>
+</property>
+
+
+<property>
+  <name>hive.exec.mode.local.auto</name>
+  <value>false</value>
+  <description>
+    Let hive determine whether to run in local mode automatically
+    Disabling this for tests so that minimr is not affected
+  </description>
+</property>
+
+
+<!-- MetaStore settings -->
+
+
+<property>
+  <name>javax.jdo.option.ConnectionURL</name>
+  <value>jdbc:derby:memory:${test.tmp.dir}/junit_metastore_db;create=true</value>
+</property>
+
+<property>
+  <name>javax.jdo.option.ConnectionDriverName</name>
+  <value>org.apache.derby.jdbc.EmbeddedDriver</value>
+</property>
+
+<property>
+  <name>javax.jdo.option.ConnectionUserName</name>
+  <value>APP</value>
+</property>
+
+<property>
+  <name>javax.jdo.option.ConnectionPassword</name>
+  <value>mine</value>
+</property>
+
+<property>
+  <!--  this should eventually be deprecated since the metastore should supply this -->
+  <name>hive.metastore.warehouse.dir</name>
+  <value>${test.warehouse.dir}</value>
+  <description></description>
+</property>
+
+<property>
+  <name>hive.metastore.metadb.dir</name>
+  <value>file://${test.tmp.dir}/metadb/</value>
+  <description>
+  Required by metastore server or if the uris argument below is not supplied
+  </description>
+</property>
+
+</configuration>
diff --git a/phoenix-hive3/src/test/resources/log4j.properties b/phoenix-hive3/src/test/resources/log4j.properties
new file mode 100644
index 0000000..41fe21e
--- /dev/null
+++ b/phoenix-hive3/src/test/resources/log4j.properties
@@ -0,0 +1,38 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define the root logger to the system property "hbase.root.logger".
+log4j.rootLogger=INFO,console
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d %-5p %C(%L): %m%n
+
+# Custom Logging levels
+
+log4j.logger.org.apache.zookeeper=ERROR
+log4j.logger.org.apache.hadoop.hbase=DEBUG
+log4j.logger.org.apache.phoenix.hive=DEBUG
+log4j.logger.org.apache.phoenix.hive.query=DEBUG
+log4j.logger.org.apache.phoenix.hive.objectinspector=DEBUG
diff --git a/phoenix-hive3/src/test/resources/tez-site.xml b/phoenix-hive3/src/test/resources/tez-site.xml
new file mode 100644
index 0000000..97ae8c5
--- /dev/null
+++ b/phoenix-hive3/src/test/resources/tez-site.xml
@@ -0,0 +1,69 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+    http://www.apache.org/licenses/LICENSE-2.0
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+<!-- Site specific YARN configuration properties -->
+<property>
+    <name>tez.am.resource.memory.mb</name>
+    <value>500</value>
+  </property>
+
+<property>
+    <name>tez.am.task.memory.mb</name>
+    <value>500</value>
+  </property>
+
+<property>
+    <name>hive.tez.container.size</name>
+    <value>500</value>
+</property>
+
+
+<property>
+    <name>hive.in.tez.test</name>
+    <value>true</value>
+</property>
+
+<property>
+    <name>tez.ignore.lib.uris</name>
+    <value>true</value>
+</property>
+
+
+<property>
+  <name>hive.tez.input.format</name>
+  <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
+  <description>The default input format for tez. Tez groups splits in the AM.</description>
+</property>
+
+
+<property>
+  <name>hive.input.format</name>
+  <value>org.apache.hadoop.hive.ql.io.CombineHiveInputFormat</value>
+  <description>The default input format, if it is not specified, the system assigns it. It is set to HiveInputFormat for hadoop versions 17, 18 and 19, whereas it is set to CombineHiveInputFormat for hadoop 20. The user can always overwrite it - if there is a bug in CombineHiveInputFormat, it can always be manually set to HiveInputFormat. </description>
+</property>
+
+<property>
+  <name>hive.auto.convert.join</name>
+  <value>false</value>
+  <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file size</description>
+</property>
+
+<property>
+  <name>hive.ignore.mapjoin.hint</name>
+  <value>true</value>
+  <description>Whether Hive ignores the mapjoin hint</description>
+</property>
+
+  
+</configuration>
\ No newline at end of file
diff --git a/pom.xml b/pom.xml
index 8757e4f..214d65b 100644
--- a/pom.xml
+++ b/pom.xml
@@ -28,6 +28,7 @@
     <module>phoenix-pig</module>
     <module>phoenix-spark</module>
     <module>phoenix-hive</module>
+    <module>phoenix-hive3</module>
   </modules>
 
   <repositories>
@@ -60,10 +61,11 @@
     <test.output.tofile>true</test.output.tofile>
     <top.dir>${project.basedir}</top.dir>
 
+    <jdk.version>1.7</jdk.version>
 
     <!-- Dependency versions -->
     <hive.version>1.2.1</hive.version>
-    <hadoop.version>2.7.1</hadoop.version>
+    <hadoop.version>2.7.5</hadoop.version>
     <pig.version>0.13.0</pig.version>
     <log4j.version>1.2.17</log4j.version>
     <disruptor.version>3.3.6</disruptor.version>
@@ -97,7 +99,7 @@
     <!-- Set default encoding so multi-byte tests work correctly on the Mac -->
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
-    <curator.version>2.12.0</curator.version>  
+    <curator.version>2.12.0</curator.version>
 
   </properties>
 
@@ -106,11 +108,31 @@
       <plugins>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
+          <artifactId>maven-toolchains-plugin</artifactId>
+          <version>1.1</version>
+          <executions>
+            <execution>
+              <goals>
+                <goal>toolchain</goal>
+              </goals>
+            </execution>
+          </executions>
+          <configuration>
+            <toolchains>
+              <jdk>
+                <version>${jdk.version}</version>
+              </jdk>
+            </toolchains>
+          </configuration>
+        </plugin>
+
+        <plugin>
+          <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-compiler-plugin</artifactId>
           <version>3.0</version>
           <configuration>
-            <source>1.7</source>
-            <target>1.7</target>
+            <source>${jdk.version}</source>
+            <target>${jdk.version}</target>
           </configuration>
         </plugin>
         <!--This plugin's configuration is used to store Eclipse m2e settings 
@@ -587,7 +609,7 @@
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-common</artifactId>
-        <version>${hadoop-two.version}</version>
+        <version>${hadoop.version}</version>
         <exclusions>
           <exclusion>
             <groupId>org.xerial.snappy</groupId>
@@ -598,58 +620,64 @@
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-annotations</artifactId>
-        <version>${hadoop-two.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-mapreduce-client-core</artifactId>
-        <version>${hadoop-two.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-minicluster</artifactId>
-        <version>${hadoop-two.version}</version>
+        <version>${hadoop.version}</version>
         <optional>true</optional>
         <scope>test</scope>
       </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-client-minicluster</artifactId>
+        <version>${hadoop.version}</version>
+        <scope>test</scope>
+      </dependency>
 
       <!-- Required for mini-cluster since hbase built against old version of hadoop -->
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-auth</artifactId>
-        <version>${hadoop-two.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-mapreduce-client-common</artifactId>
-        <version>${hadoop-two.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
-        <version>${hadoop-two.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-client</artifactId>
-        <version>${hadoop-two.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-hdfs</artifactId>
-        <version>${hadoop-two.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-hdfs</artifactId>
-        <version>${hadoop-two.version}</version>
+        <version>${hadoop.version}</version>
         <type>test-jar</type> <!-- this does not work which is typical for maven.-->
         <scope>test</scope>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-minikdc</artifactId>
-        <version>${hadoop-two.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <!-- General Dependencies -->