DRILL-8279: Use thick Phoenix driver
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 0c7b24a..185a113 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -29,7 +29,7 @@
   build:
     name: Main Build
     runs-on: ubuntu-latest
-    timeout-minutes: 100
+    timeout-minutes: 120
     strategy:
       matrix:
         # Java versions to run unit tests
diff --git a/contrib/storage-hive/core/pom.xml b/contrib/storage-hive/core/pom.xml
index 2bcaf13..795cb49 100644
--- a/contrib/storage-hive/core/pom.xml
+++ b/contrib/storage-hive/core/pom.xml
@@ -76,6 +76,14 @@
           <groupId>io.airlift</groupId>
           <artifactId>aircompressor</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>co.cask.tephra</groupId>
+          <artifactId>tephra-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>co.cask.tephra</groupId>
+          <artifactId>tephra-core</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
     <dependency>
diff --git a/contrib/storage-phoenix/README.md b/contrib/storage-phoenix/README.md
index fa8467f..139a60c 100644
--- a/contrib/storage-phoenix/README.md
+++ b/contrib/storage-phoenix/README.md
@@ -22,7 +22,6 @@
  2. PHOENIX-6582: Bump default HBase version to 2.3.7 and 2.4.8
  3. PHOENIX-6605, PHOENIX-6606 and PHOENIX-6607.
  4. DRILL-8060, DRILL-8061 and DRILL-8062.
- 5. [QueryServer 6.0.0-drill-r1](https://github.com/luocooong/phoenix-queryserver/releases/tag/6.0.0-drill-r1)
 
 ## Configuration
 
@@ -33,8 +32,9 @@
 ```json
 {
   "type": "phoenix",
-  "host": "the.queryserver.hostname",
-  "port": 8765,
+  "zkQuorum": "zk.quorum.hostnames",
+  "port": 2181,
+  "zkPath": "/hbase",
   "enabled": true
 }
 ```
@@ -44,7 +44,7 @@
 ```json
 {
   "type": "phoenix",
-  "jdbcURL": "jdbc:phoenix:thin:url=http://the.queryserver.hostname:8765;serialization=PROTOBUF",
+  "jdbcURL": "jdbc:phoenix:zk.quorum.hostname1,zk.quorum.hostname2,zk.quorum.hostname3:2181:/hbase",
   "enabled": true
 }
 ```
@@ -54,10 +54,11 @@
 ```json
 {
   "type": "phoenix",
-  "host": "the.queryserver.hostname",
-  "port": 8765,
+  "zkQuorum": "zk.quorum.hostnames",
+  "port": 2181,
   "props": {
-    "phoenix.query.timeoutMs": 60000
+    "hbase.client.retries.number": 10,
+    "hbase.client.pause": 10000
   },
   "enabled": true
 }
@@ -65,13 +66,12 @@
 
 Tips :
  * More connection properties, see also [PQS Configuration](http://phoenix.apache.org/server.html).
- * If you provide the `jdbcURL`, the connection will ignore the value of `host` and `port`.
- * If you [extended the authentication of QueryServer](https://github.com/Boostport/avatica/issues/28), you can also pass the `userName` and `password`.
+ * If you provide the `jdbcURL`, the connection will ignore the value of `zkQuorum` and `port`.
 
 ```json
 {
   "type": "phoenix",
-  "host": "the.queryserver.hostname",
+  "zkQuorum": "zk.quorum.hostnames",
   "port": 8765,
   "userName": "my_user",
   "password": "my_pass",
@@ -83,11 +83,11 @@
 Configurations :
 1. Enable [Drill User Impersonation](https://drill.apache.org/docs/configuring-user-impersonation/)
 2. Enable [PQS Impersonation](https://phoenix.apache.org/server.html#Impersonation)
-3. PQS URL:
-  1. Provide `host` and `port` and Drill will generate the PQS URL with a doAs parameter of current session user
-  2. Provide the `jdbcURL` with a `doAs` url param and `$user` placeholder as a value, for instance:
-     `jdbc:phoenix:thin:url=http://localhost:8765?doAs=$user`. In case Drill Impersonation is enabled, but `doAs=$user`
-     is missing the User Exception is thrown.
+3. Phoenix URL:
+  1. Provide `zkQuorum` and `port` and Drill will create a connection to Phoenix with a doAs of current 
+     session user
+  2. Provide the `jdbcURL` with a `principal`, for instance:
+     `jdbc:phoenix:<ZK-QUORUM>:<ZK-PORT>:<ZK-HBASE-NODE>:principal_name@REALM:/path/to/keytab`.
 
 ## Testing
 
@@ -99,27 +99,6 @@
 requires a recompilation of HBase because of incompatible changes between Hadoop2 and Hadoop3. "
 ```
 
-### Recommended Practices
-
- 1. Download HBase 2.4.2 sources and rebuild with Hadoop 3.
-
-    ```mvn clean install -DskipTests -Dhadoop.profile=3.0 -Dhadoop-three.version=3.2.3```
-
- 2. Remove the `Ignore` annotation in `PhoenixTestSuite.java`.
-    
-  ```
-    @Ignore
-    @Category({ SlowTest.class })
-    public class PhoenixTestSuite extends ClusterTest {
-   ```
-    
- 3. Go to the phoenix root folder and run test.
-
-  ```
-  cd contrib/storage-phoenix/
-  mvn test
-  ```
-
 ### To Add Features
 
  - Don't forget to add a test function to the test class.
diff --git a/contrib/storage-phoenix/pom.xml b/contrib/storage-phoenix/pom.xml
index d47b60a..4fe3829 100644
--- a/contrib/storage-phoenix/pom.xml
+++ b/contrib/storage-phoenix/pom.xml
@@ -29,12 +29,12 @@
   <name>Drill : Contrib : Storage : Phoenix</name>
   
   <properties>
-    <phoenix.queryserver.version>6.0.0</phoenix.queryserver.version>
     <phoenix.version>5.1.2</phoenix.version>
     <!-- Keep the 2.4.2 to reduce dependency conflict -->
     <hbase.minicluster.version>2.4.2</hbase.minicluster.version>
+    <phoenix.skip.tests>false</phoenix.skip.tests>
   </properties>
-  
+
   <dependencies>
     <dependency>
       <groupId>org.apache.drill.exec</groupId>
@@ -55,60 +55,6 @@
       <version>${project.version}</version>
       <scope>test</scope>
     </dependency>
-	<dependency>
-	  <groupId>org.apache.phoenix</groupId>
-	  <artifactId>phoenix-queryserver-client</artifactId>
-	  <version>${phoenix.queryserver.version}</version>
-	</dependency>
- 
-    <!-- Test Dependency versions -->
-    <dependency>
-      <!-- PHOENIX-6605 -->
-      <groupId>com.github.luocooong.phoenix-queryserver</groupId>
-      <artifactId>phoenix-queryserver</artifactId>
-      <version>6.0.0-drill-r1</version>
-      <scope>test</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>org.slf4j</groupId>
-          <artifactId>*</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>log4j</groupId>
-          <artifactId>log4j</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>commons-logging</groupId>
-          <artifactId>commons-logging</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>javax.servlet</groupId>
-          <artifactId>*</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.ow2.asm</groupId>
-          <artifactId>asm</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <!-- PHOENIX-6605 -->
-      <groupId>com.github.luocooong.phoenix-queryserver</groupId>
-      <artifactId>phoenix-queryserver-it</artifactId>
-      <version>6.0.0-drill-r1</version>
-      <scope>test</scope>
-      <classifier>tests</classifier>
-      <exclusions>
-        <exclusion>
-          <groupId>org.codehaus.jackson</groupId>
-          <artifactId>jackson-jaxrs</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.codehaus.jackson</groupId>
-          <artifactId>jackson-xc</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
     <dependency>
       <groupId>org.apache.phoenix</groupId>
       <artifactId>phoenix-core</artifactId>
@@ -136,13 +82,53 @@
           <groupId>org.ow2.asm</groupId>
           <artifactId>asm</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>org.ow2.asm</groupId>
+          <artifactId>asm-all</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-configuration</groupId>
+          <artifactId>commons-configuration</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.commons</groupId>
+          <artifactId>commons-csv</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hbase</groupId>
+          <artifactId>hbase-endpoint</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.salesforce.i18n</groupId>
+          <artifactId>i18n-util</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>jline</groupId>
+          <artifactId>jline</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.codahale.metrics</groupId>
+          <artifactId>metrics-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.codahale.metrics</groupId>
+          <artifactId>metrics-graphite</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jboss.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.phoenix</groupId>
+      <artifactId>phoenix-hbase-compat-2.4.1</artifactId>
+      <version>${phoenix.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.phoenix</groupId>
       <artifactId>phoenix-core</artifactId>
       <version>${phoenix.version}</version>
-      <scope>test</scope>
       <exclusions>
         <exclusion>
           <groupId>org.slf4j</groupId>
@@ -152,6 +138,62 @@
           <groupId>log4j</groupId>
           <artifactId>log4j</artifactId>
         </exclusion>
+        <exclusion>
+          <artifactId>commons-logging</artifactId>
+          <groupId>commons-logging</groupId>
+        </exclusion>
+        <exclusion>
+          <groupId>javax.servlet</groupId>
+          <artifactId>*</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.ow2.asm</groupId>
+          <artifactId>asm</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.ow2.asm</groupId>
+          <artifactId>asm-all</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-configuration</groupId>
+          <artifactId>commons-configuration</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hbase</groupId>
+          <artifactId>hbase-testing-util</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.htrace</groupId>
+          <artifactId>htrace-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.commons</groupId>
+          <artifactId>commons-csv</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hbase</groupId>
+          <artifactId>hbase-endpoint</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>jline</groupId>
+          <artifactId>jline</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.salesforce.i18n</groupId>
+          <artifactId>i18n-util</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.codahale.metrics</groupId>
+          <artifactId>metrics-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.codahale.metrics</groupId>
+          <artifactId>metrics-graphite</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jboss.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
     <dependency>
@@ -170,21 +212,20 @@
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-it</artifactId>
       <version>${hbase.minicluster.version}</version>
-      <type>test-jar</type>
+      <classifier>tests</classifier>
       <scope>test</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>org.apache.hbase</groupId>
+          <artifactId>hbase-endpoint</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-common</artifactId>
       <version>${hbase.minicluster.version}</version>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-server</artifactId>
-      <version>${hbase.minicluster.version}</version>
-      <type>test-jar</type>
+      <classifier>tests</classifier>
       <scope>test</scope>
     </dependency>
     <dependency>
@@ -214,7 +255,7 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-asyncfs</artifactId>
-      <type>test-jar</type>
+      <classifier>tests</classifier>
       <version>${hbase.minicluster.version}</version>
       <scope>test</scope>
       <exclusions>
@@ -288,29 +329,12 @@
           <groupId>com.zaxxer</groupId>
           <artifactId>HikariCP-java7</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>org.apache.commons</groupId>
+          <artifactId>commons-csv</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
-    <dependency>
-      <groupId>com.univocity</groupId>
-      <artifactId>univocity-parsers</artifactId>
-      <version>${univocity-parsers.version}</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.eclipse.jetty</groupId>
-      <artifactId>jetty-server</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.eclipse.jetty</groupId>
-      <artifactId>jetty-http</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.eclipse.jetty</groupId>
-      <artifactId>jetty-servlet</artifactId>
-      <scope>test</scope>
-    </dependency>
   </dependencies>
   <build>
     <plugins>
@@ -339,8 +363,12 @@
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-surefire-plugin</artifactId>
         <configuration>
+          <skipTests>${phoenix.skip.tests}</skipTests>
+          <forkCount combine.self="override">1</forkCount>
+          <reuseForks>false</reuseForks>
           <includes>
             <include>**/PhoenixTestSuite.class</include>
+            <include>**/SecuredPhoenixTestSuite.class</include>
           </includes>
           <excludes>
             <exclude>**/*Test.java</exclude>
@@ -353,6 +381,9 @@
   <profiles>
     <profile>
       <id>hadoop-2</id>
+      <properties>
+        <phoenix.skip.tests>true</phoenix.skip.tests>
+      </properties>
       <build>
         <plugins>
           <plugin>
diff --git a/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixDataSource.java b/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixDataSource.java
index b89a530..806317f 100644
--- a/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixDataSource.java
+++ b/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixDataSource.java
@@ -18,40 +18,42 @@
 package org.apache.drill.exec.store.phoenix;
 
 import java.io.PrintWriter;
+import java.security.PrivilegedExceptionAction;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.SQLException;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Properties;
+import java.util.function.Supplier;
 import java.util.logging.Logger;
 
 import javax.sql.DataSource;
 
-import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.common.exceptions.DrillRuntimeException;
+import org.apache.drill.common.util.function.CheckedSupplier;
+import org.apache.drill.exec.util.ImpersonationUtil;
 import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.slf4j.LoggerFactory;
 
 /**
  * Phoenix’s Connection objects are different from most other JDBC Connections
  * due to the underlying HBase connection. The Phoenix Connection object
  * is designed to be a thin object that is inexpensive to create.
- *
+ * <p>
  * If Phoenix Connections are reused, it is possible that the underlying HBase connection
  * is not always left in a healthy state by the previous user. It is better to
  * create new Phoenix Connections to ensure that you avoid any potential issues.
  */
 public class PhoenixDataSource implements DataSource {
   private static final org.slf4j.Logger logger = LoggerFactory.getLogger(PhoenixDataSource.class);
-
-  private static final String DEFAULT_URL_HEADER = "jdbc:phoenix:thin:url=http://";
-  private static final String DEFAULT_SERIALIZATION = "serialization=PROTOBUF";
-  private static final String IMPERSONATED_USER_VARIABLE = "$user";
-  private static final String DEFAULT_QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM = "doAs";
+  private static final String DEFAULT_URL_HEADER = "jdbc:phoenix:";
 
   private final String url;
   private final String user;
-  private Map<String, Object> connectionProperties;
-  private boolean isFatClient;
+  private final Map<String, Object> connectionProperties;
+  private final boolean impersonationEnabled;
 
   public PhoenixDataSource(String url,
                            String userName,
@@ -61,29 +63,30 @@
     Preconditions.checkNotNull(connectionProperties);
     connectionProperties.forEach((k, v)
         -> Preconditions.checkArgument(v != null, String.format("does not accept null values : %s", k)));
-    this.url = impersonationEnabled ? doAsUserUrl(url, userName) : url;
+    this.impersonationEnabled = impersonationEnabled;
+    this.url = url;
     this.user = userName;
     this.connectionProperties = connectionProperties;
   }
 
-  public PhoenixDataSource(String host,
+  public PhoenixDataSource(String zkQuorum,
                            int port,
+                           String zkPath,
                            String userName,
                            Map<String, Object> connectionProperties,
                            boolean impersonationEnabled) {
-    Preconditions.checkNotNull(host, userName);
-    Preconditions.checkArgument(port > 0, "Please set the correct port.");
+    Preconditions.checkNotNull(zkQuorum, userName);
     connectionProperties.forEach((k, v)
       -> Preconditions.checkArgument(v != null, String.format("does not accept null values : %s", k)));
-    this.url = new StringBuilder()
+    StringBuilder stringBuilder = new StringBuilder()
       .append(DEFAULT_URL_HEADER)
-      .append(host)
+      .append(zkQuorum)
       .append(":")
-      .append(port)
-      .append(impersonationEnabled ? "?doAs=" + userName : "")
-      .append(";")
-      .append(DEFAULT_SERIALIZATION)
-      .toString();
+      .append(port);
+    Optional.ofNullable(zkPath)
+      .ifPresent(path -> stringBuilder.append(":").append(path));
+    this.url = stringBuilder.toString();
+    this.impersonationEnabled = impersonationEnabled;
     this.user = userName;
     this.connectionProperties = connectionProperties;
   }
@@ -92,10 +95,6 @@
     return connectionProperties;
   }
 
-  public void setConnectionProperties(Map<String, Object> connectionProperties) {
-    this.connectionProperties = connectionProperties;
-  }
-
   @Override
   public PrintWriter getLogWriter() {
     throw new UnsupportedOperationException("getLogWriter");
@@ -138,30 +137,37 @@
 
   @Override
   public Connection getConnection() throws SQLException {
-    useDriverClass();
+    loadDriverClass();
     return getConnection(this.user, null);
   }
 
   @Override
   public Connection getConnection(String userName, String password) throws SQLException {
-    useDriverClass();
+    loadDriverClass();
     logger.debug("Drill/Phoenix connection url: {}", url);
-    return DriverManager.getConnection(url, useConfProperties());
+    CheckedSupplier<Connection, SQLException> action =
+      () -> DriverManager.getConnection(url, useConfProperties());
+    if (impersonationEnabled) {
+      return doAsRemoteUser(userName, action);
+    }
+    return action.getAndThrow();
+  }
+
+  private <T> T doAsRemoteUser(String remoteUserName, final Supplier<T> action) {
+    try {
+      UserGroupInformation proxyUser = ImpersonationUtil.createProxyUgi(remoteUserName);
+      return proxyUser.doAs((PrivilegedExceptionAction<T>) action::get);
+    } catch (Exception e) {
+      throw new DrillRuntimeException(e);
+    }
   }
 
   /**
-   * The thin-client is lightweight and better compatibility.
-   * Only thin-client is currently supported.
-   *
-   * @throws SQLException
+   * Only thick-client is currently supported due to a shaded Avatica conflict created by the thin client.
    */
-  public Class<?> useDriverClass() throws SQLException {
+  private void loadDriverClass() throws SQLException {
     try {
-      if (isFatClient) {
-        return Class.forName(PhoenixStoragePluginConfig.FAT_DRIVER_CLASS);
-      } else {
-        return Class.forName(PhoenixStoragePluginConfig.THIN_DRIVER_CLASS);
-      }
+      Class.forName(PhoenixStoragePluginConfig.FAT_DRIVER_CLASS);
     } catch (ClassNotFoundException e) {
       throw new SQLException("Cause by : " + e.getMessage());
     }
@@ -180,17 +186,7 @@
     props.putIfAbsent("phoenix.trace.frequency", "never");
     props.putIfAbsent("phoenix.query.timeoutMs", 30000);
     props.putIfAbsent("phoenix.query.keepAliveMs", 120000);
+    props.putIfAbsent("phoenix.schema.isNamespaceMappingEnabled", "true");
     return props;
   }
-
-  private String doAsUserUrl(String url, String userName) {
-    if (url.contains(DEFAULT_QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM)) {
-      return url.replace(IMPERSONATED_USER_VARIABLE, userName);
-    } else {
-      throw UserException
-        .connectionError()
-        .message("Invalid PQS URL. Please add the value of the `doAs=$user` parameter if Impersonation is enabled.")
-        .build(logger);
-    }
-  }
 }
diff --git a/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixReader.java b/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixReader.java
index 0ecc4ae..1d70276 100644
--- a/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixReader.java
+++ b/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixReader.java
@@ -236,7 +236,11 @@
     public void load(ResultSet row) throws SQLException {
       Array array = row.getArray(index);
       if (array != null && array.getArray() != null) {
-        Object[] values = (Object[]) array.getArray();
+        // Phoenix can create an array of primitives, so need to convert them
+        Object[] values = new Object[java.lang.reflect.Array.getLength(array.getArray())];
+        for (int i = 0; i < values.length; i++) {
+          values[i] = java.lang.reflect.Array.get(array.getArray(), i);
+        }
         ((ScalarArrayWriter) writer).setObjectArray(values);
       }
     }
diff --git a/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixStoragePlugin.java b/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixStoragePlugin.java
index b46fef2..5420995 100644
--- a/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixStoragePlugin.java
+++ b/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixStoragePlugin.java
@@ -121,7 +121,7 @@
     try {
       return CACHE.get(userName);
     } catch (final ExecutionException e) {
-      throw new SQLException("Failure setting up Phoenix DataSource (PQS client)", e);
+      throw new SQLException("Failure setting up Phoenix DataSource (Phoenix client)", e);
     }
   }
 
@@ -135,6 +135,6 @@
     boolean impersonationEnabled = context.getConfig().getBoolean(ExecConstants.IMPERSONATION_ENABLED);
     return StringUtils.isNotBlank(config.getJdbcURL())
       ? new PhoenixDataSource(config.getJdbcURL(), userName, props, impersonationEnabled) // the props is initiated.
-      : new PhoenixDataSource(config.getHost(), config.getPort(), userName, props, impersonationEnabled);
+      : new PhoenixDataSource(config.getZkQuorum(), config.getPort(), config.getZkPath(), userName, props, impersonationEnabled);
   }
 }
diff --git a/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixStoragePluginConfig.java b/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixStoragePluginConfig.java
index 8a9e6a8..2bac97b 100644
--- a/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixStoragePluginConfig.java
+++ b/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixStoragePluginConfig.java
@@ -38,26 +38,28 @@
 public class PhoenixStoragePluginConfig extends StoragePluginConfig {
 
   public static final String NAME = "phoenix";
-  public static final String THIN_DRIVER_CLASS = "org.apache.phoenix.queryserver.client.Driver";
   public static final String FAT_DRIVER_CLASS = "org.apache.phoenix.jdbc.PhoenixDriver";
 
-  private final String host;
-  private final int port;
-  private final String jdbcURL; // (options) Equal to host + port
+  private final String zkQuorum;
+  private final String zkPath;
+  private final Integer port;
+  private final String jdbcURL; // (options) Equal to host + port + zkPath
   private final Map<String, Object> props; // (options) See also http://phoenix.apache.org/tuning.html
 
   @JsonCreator
   public PhoenixStoragePluginConfig(
-      @JsonProperty("host") String host,
-      @JsonProperty("port") int port,
+      @JsonProperty("zkQuorum") String zkQuorum,
+      @JsonProperty("port") Integer port,
+      @JsonProperty("zkPath") String zkPath,
       @JsonProperty("userName") String userName,
       @JsonProperty("password") String password,
       @JsonProperty("jdbcURL") String jdbcURL,
       @JsonProperty("credentialsProvider") CredentialsProvider credentialsProvider,
       @JsonProperty("props") Map<String, Object> props) {
     super(CredentialProviderUtils.getCredentialsProvider(userName, password, credentialsProvider), credentialsProvider == null);
-    this.host = host;
-    this.port = port == 0 ? 8765 : port;
+    this.zkQuorum = zkQuorum;
+    this.zkPath = zkPath;
+    this.port = port;
     this.jdbcURL = jdbcURL;
     this.props = props == null ? Collections.emptyMap() : props;
   }
@@ -69,13 +71,18 @@
       .build();
   }
 
-  @JsonProperty("host")
-  public String getHost() {
-    return host;
+  @JsonProperty("zkQuorum")
+  public String getZkQuorum() {
+    return zkQuorum;
+  }
+
+  @JsonProperty("zkPath")
+  public String getZkPath() {
+    return zkPath;
   }
 
   @JsonProperty("port")
-  public int getPort() {
+  public Integer getPort() {
     return port;
   }
 
@@ -124,7 +131,9 @@
       return Objects.equals(this.jdbcURL, config.getJdbcURL());
     }
     // Then the host and port
-    return Objects.equals(this.host, config.getHost()) && Objects.equals(this.port, config.getPort());
+    return Objects.equals(this.zkQuorum, config.getZkQuorum())
+      && Objects.equals(this.port, config.getPort())
+      && Objects.equals(this.zkPath, config.getZkPath());
   }
 
   @Override
@@ -132,15 +141,15 @@
     if (StringUtils.isNotBlank(jdbcURL)) {
      return Objects.hash(jdbcURL);
     }
-    return Objects.hash(host, port);
+    return Objects.hash(zkQuorum, port, zkPath);
   }
 
   @Override
   public String toString() {
     return new PlanStringBuilder(PhoenixStoragePluginConfig.NAME)
-        .field("driverName", THIN_DRIVER_CLASS)
-        .field("host", host)
+        .field("zkQuorum", zkQuorum)
         .field("port", port)
+        .field("zkPath", zkPath)
         .field("userName", getUsername())
         .maskedField("password", getPassword()) // will set to "*******"
         .field("jdbcURL", jdbcURL)
diff --git a/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/rules/PhoenixImplementor.java b/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/rules/PhoenixImplementor.java
index 29a45d8..096506b 100644
--- a/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/rules/PhoenixImplementor.java
+++ b/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/rules/PhoenixImplementor.java
@@ -47,14 +47,12 @@
     if (node instanceof SqlIdentifier) {
       SqlIdentifier identifier = (SqlIdentifier) node;
       String name = identifier.names.get(identifier.names.size() -1);
-      if (!aliasSet.contains(name)) {
-        /*
-         * phoenix does not support the 'SELECT `table_name`.`field_name`',
-         * need to force the alias name and start from `table_name0`,
-         * the result is that 'SELECT `table_name0`.`field_name`'.
-         */
-        aliasSet.add(name);
-      }
+      /*
+       * phoenix does not support the 'SELECT `table_name`.`field_name`',
+       * need to force the alias name and start from `table_name0`,
+       * the result is that 'SELECT `table_name0`.`field_name`'.
+       */
+      aliasSet.add(name);
     }
     return super.result(node, clauses, rel, aliases);
   }
@@ -67,12 +65,12 @@
   @Override
   public Result visit(Filter e) {
     final RelNode input = e.getInput();
-    Result x = visitRoot(input);
-    parseCorrelTable(e, x);
     if (input instanceof Aggregate) {
       return super.visit(e);
     } else {
-      final Builder builder = x.builder(e, Clause.WHERE);
+      final Result x = visitInput(e, 0, Clause.WHERE);
+      parseCorrelTable(e, x);
+      final Builder builder = x.builder(e);
       builder.setWhere(builder.context.toSql(null, e.getCondition()));
       final List<SqlNode> selectList = new ArrayList<>();
       e.getRowType().getFieldNames().forEach(fieldName -> {
diff --git a/contrib/storage-phoenix/src/main/resources/bootstrap-storage-plugins.json b/contrib/storage-phoenix/src/main/resources/bootstrap-storage-plugins.json
index a1890f0..82254e5 100644
--- a/contrib/storage-phoenix/src/main/resources/bootstrap-storage-plugins.json
+++ b/contrib/storage-phoenix/src/main/resources/bootstrap-storage-plugins.json
@@ -2,10 +2,10 @@
   "storage": {
     "phoenix": {
       "type": "phoenix",
-      "host": "the.queryserver.hostname",
+      "zkQuorum": "zk.quorum.hostnames",
       "port": 8765,
       "props" : {
-        "phoenix.query.timeoutMs": 60000
+        "hbase.client.retries.number": 10
       },
       "enabled": false
     }
diff --git a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixBaseTest.java b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixBaseTest.java
index 9aea261..ecb08c7 100644
--- a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixBaseTest.java
+++ b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixBaseTest.java
@@ -59,15 +59,15 @@
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
     TimeZone.setDefault(TimeZone.getTimeZone("UTC"));
-    PhoenixTestSuite.initPhoenixQueryServer();
+    PhoenixTestSuite.initPhoenix();
     if (PhoenixTestSuite.isRunningSuite()) {
-      QueryServerBasicsIT.testCatalogs();
+      PhoenixBasicsIT.testCatalogs();
     }
     startDrillCluster();
     if (initCount.incrementAndGet() == 1) {
-      createSchema(QueryServerBasicsIT.CONN_STRING);
-      createTables(QueryServerBasicsIT.CONN_STRING);
-      createSampleData(QueryServerBasicsIT.CONN_STRING);
+      createSchema(PhoenixBasicsIT.CONN_STRING);
+      createTables(PhoenixBasicsIT.CONN_STRING);
+      createSampleData(PhoenixBasicsIT.CONN_STRING);
     }
   }
 
@@ -85,8 +85,8 @@
     props.put("phoenix.query.timeoutMs", 90000);
     props.put("phoenix.query.keepAliveMs", "30000");
     StoragePluginRegistry registry = cluster.drillbit().getContext().getStorage();
-    PhoenixStoragePluginConfig config = new PhoenixStoragePluginConfig(null, 0, null, null,
-      QueryServerBasicsIT.CONN_STRING, null, props);
+    PhoenixStoragePluginConfig config = new PhoenixStoragePluginConfig(null, 0, null, null, null,
+      PhoenixBasicsIT.CONN_STRING, null, props);
     config.setEnabled(true);
     registry.put(PhoenixStoragePluginConfig.NAME + "123", config);
     dirTestWatcher.copyResourceToRoot(Paths.get(""));
diff --git a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixBasicsIT.java b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixBasicsIT.java
new file mode 100644
index 0000000..bb61e01
--- /dev/null
+++ b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixBasicsIT.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.phoenix;
+
+import static org.apache.hadoop.hbase.HConstants.HBASE_DIR;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_CAT;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.util.Optional;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.LocalHBaseCluster;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This is a copy of {@code org.apache.phoenix.end2end.QueryServerBasicsIT} until
+ * <a href="https://issues.apache.org/jira/browse/PHOENIX-6613">PHOENIX-6613</a> is fixed
+ */
+public class PhoenixBasicsIT {
+  private static final HBaseTestingUtility util = new HBaseTestingUtility();
+
+  private static final org.slf4j.Logger logger = LoggerFactory.getLogger(PhoenixBasicsIT.class);
+
+  protected static String CONN_STRING;
+  static LocalHBaseCluster hbaseCluster;
+
+  public static synchronized void doSetup() throws Exception {
+    Configuration conf = util.getConfiguration();
+    // Start ZK by hand
+    util.startMiniZKCluster();
+    Path rootdir = util.getDataTestDirOnTestFS(PhoenixBasicsIT.class.getSimpleName());
+    // There is no setRootdir method that is available in all supported HBase versions.
+    conf.set(HBASE_DIR, rootdir.toString());
+    hbaseCluster = new LocalHBaseCluster(conf, 1);
+    hbaseCluster.startup();
+
+    CONN_STRING = PhoenixRuntime.JDBC_PROTOCOL + ":localhost:" + getZookeeperPort();
+    logger.info("JDBC connection string is " + CONN_STRING);
+  }
+
+  public static int getZookeeperPort() {
+    return util.getConfiguration().getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 2181);
+  }
+
+  public static void testCatalogs() throws Exception {
+    try (final Connection connection = DriverManager.getConnection(CONN_STRING)) {
+      assertFalse(connection.isClosed());
+      try (final ResultSet resultSet = connection.getMetaData().getCatalogs()) {
+        final ResultSetMetaData metaData = resultSet.getMetaData();
+        assertFalse("unexpected populated resultSet", resultSet.next());
+        assertEquals(1, metaData.getColumnCount());
+        assertEquals(TABLE_CAT, metaData.getColumnName(1));
+      }
+    }
+  }
+
+  public static synchronized void afterClass() throws IOException {
+    Optional.of(hbaseCluster).ifPresent(LocalHBaseCluster::shutdown);
+    util.shutdownMiniCluster();
+  }
+}
diff --git a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixSQLTest.java b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixSQLTest.java
index 8e091b9..39cddcc 100644
--- a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixSQLTest.java
+++ b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixSQLTest.java
@@ -220,7 +220,7 @@
     sets.clear();
   }
 
-  @Ignore("use the remote query server directly without minicluster")
+  @Ignore("use the remote phoenix directly without minicluster")
   @Test
   public void testJoinWithFilterPushdown() throws Exception {
     String sql = "select 10 as DRILL, a.n_name, b.r_name from phoenix123.v1.nation a join phoenix123.v1.region b "
diff --git a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixTestSuite.java b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixTestSuite.java
index a0f19ff..0e43090 100644
--- a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixTestSuite.java
+++ b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixTestSuite.java
@@ -24,7 +24,6 @@
 import org.apache.drill.test.BaseTest;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
 import org.junit.runners.Suite;
@@ -38,22 +37,21 @@
    PhoenixSQLTest.class,
    PhoenixCommandTest.class
 })
-@Ignore
 @Category({ SlowTest.class })
 public class PhoenixTestSuite extends BaseTest {
 
   private static final org.slf4j.Logger logger = LoggerFactory.getLogger(PhoenixTestSuite.class);
 
   private static volatile boolean runningSuite = false;
-  private static AtomicInteger initCount = new AtomicInteger(0);
+  private static final AtomicInteger initCount = new AtomicInteger(0);
 
   @BeforeClass
-  public static void initPhoenixQueryServer() throws Exception {
+  public static void initPhoenix() throws Exception {
     TimeZone.setDefault(TimeZone.getTimeZone("UTC"));
     synchronized (PhoenixTestSuite.class) {
       if (initCount.get() == 0) {
         logger.info("Boot the test cluster...");
-        QueryServerBasicsIT.doSetup();
+        PhoenixBasicsIT.doSetup();
       }
       initCount.incrementAndGet();
       runningSuite = true;
@@ -65,7 +63,7 @@
     synchronized (PhoenixTestSuite.class) {
       if (initCount.decrementAndGet() == 0) {
         logger.info("Shutdown all instances of test cluster.");
-        QueryServerBasicsIT.afterClass();
+        PhoenixBasicsIT.afterClass();
       }
     }
   }
diff --git a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/QueryServerBasicsIT.java b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/QueryServerBasicsIT.java
deleted file mode 100644
index 4a48bfc..0000000
--- a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/QueryServerBasicsIT.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.phoenix;
-
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_CAT;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.ResultSetMetaData;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.phoenix.end2end.QueryServerThread;
-import org.apache.phoenix.query.BaseTest;
-import org.apache.phoenix.queryserver.QueryServerProperties;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.apache.phoenix.util.ThinClientUtil;
-import org.slf4j.LoggerFactory;
-
-/**
- * This is a copy of {@link org.apache.phoenix.end2end.QueryServerBasicsIT} until
- * <a href="https://issues.apache.org/jira/browse/PHOENIX-6613">PHOENIX-6613</a> is fixed
- */
-public class QueryServerBasicsIT extends BaseTest {
-
-  private static final org.slf4j.Logger logger = LoggerFactory.getLogger(QueryServerBasicsIT.class);
-
-  private static QueryServerThread AVATICA_SERVER;
-  private static Configuration CONF;
-  protected static String CONN_STRING;
-
-  public static synchronized void doSetup() throws Exception {
-      setUpTestDriver(ReadOnlyProps.EMPTY_PROPS);
-
-      CONF = config;
-      if(System.getProperty("do.not.randomize.pqs.port") == null) {
-        CONF.setInt(QueryServerProperties.QUERY_SERVER_HTTP_PORT_ATTRIB, 0);
-      }
-      String url = getUrl();
-      AVATICA_SERVER = new QueryServerThread(new String[] { url }, CONF, QueryServerBasicsIT.class.getName());
-      AVATICA_SERVER.start();
-      AVATICA_SERVER.getQueryServer().awaitRunning();
-      final int port = AVATICA_SERVER.getQueryServer().getPort();
-      logger.info("Avatica server started on port " + port);
-      CONN_STRING = ThinClientUtil.getConnectionUrl("localhost", port);
-      logger.info("JDBC connection string is " + CONN_STRING);
-  }
-
-  public static void testCatalogs() throws Exception {
-    try (final Connection connection = DriverManager.getConnection(CONN_STRING)) {
-      assertFalse(connection.isClosed());
-      try (final ResultSet resultSet = connection.getMetaData().getCatalogs()) {
-        final ResultSetMetaData metaData = resultSet.getMetaData();
-        assertFalse("unexpected populated resultSet", resultSet.next());
-        assertEquals(1, metaData.getColumnCount());
-        assertEquals(TABLE_CAT, metaData.getColumnName(1));
-      }
-    }
-  }
-
-  public static synchronized void afterClass() throws Exception {
-    if (AVATICA_SERVER != null) {
-      AVATICA_SERVER.join(TimeUnit.SECONDS.toSeconds(3));
-      Throwable t = AVATICA_SERVER.getQueryServer().getThrowable();
-      if (t != null) {
-        fail("query server threw. " + t.getMessage());
-      }
-      assertEquals("query server didn't exit cleanly", 0, AVATICA_SERVER.getQueryServer().getRetCode());
-    }
-  }
-}
diff --git a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/HttpParamImpersonationQueryServerIT.java b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/ImpersonationPhoenixIT.java
similarity index 76%
rename from contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/HttpParamImpersonationQueryServerIT.java
rename to contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/ImpersonationPhoenixIT.java
index f0f0e11..9a4fb25 100644
--- a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/HttpParamImpersonationQueryServerIT.java
+++ b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/ImpersonationPhoenixIT.java
@@ -25,26 +25,23 @@
 import org.apache.hadoop.hbase.security.access.Permission.Action;
 import org.apache.hadoop.hbase.security.token.TokenProvider;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
-import org.apache.phoenix.end2end.TlsUtil;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
-import org.apache.phoenix.queryserver.QueryServerOptions;
-import org.apache.phoenix.queryserver.QueryServerProperties;
-import org.apache.phoenix.queryserver.client.Driver;
+import org.apache.phoenix.util.PhoenixRuntime;
 import org.junit.experimental.categories.Category;
 
 import java.util.Arrays;
 import java.util.List;
 
-import static org.apache.drill.exec.store.phoenix.secured.QueryServerEnvironment.LOGIN_USER;
+import static org.apache.drill.exec.store.phoenix.secured.PhoenixEnvironment.LOGIN_USER;
 
 /**
- * This is a copy of {@link org.apache.phoenix.end2end.HttpParamImpersonationQueryServerIT},
+ * This is a copy of {@code org.apache.phoenix.end2end.HttpParamImpersonationQueryServerIT},
  * but customized with 3 users, see {@link SecuredPhoenixBaseTest#runForThreeClients} for details
  */
 @Category(NeedsOwnMiniClusterTest.class)
-public class HttpParamImpersonationQueryServerIT {
+public class ImpersonationPhoenixIT {
 
-  public static QueryServerEnvironment environment;
+  public static PhoenixEnvironment environment;
 
   private static final List<TableName> SYSTEM_TABLE_NAMES = Arrays.asList(
     PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME,
@@ -54,7 +51,7 @@
     PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_HBASE_TABLE_NAME,
     PhoenixDatabaseMetaData.SYSTEM_STATS_HBASE_TABLE_NAME);
 
-  public static synchronized void startQueryServerEnvironment() throws Exception {
+  public static synchronized void startPhoenixEnvironment() throws Exception {
     // Clean up previous environment if any (Junit 4.13 @BeforeParam / @AfterParam would be an alternative)
     if(environment != null) {
       stopEnvironment();
@@ -69,8 +66,7 @@
     // so that the user who is running the Drillbits/MiniDfs can impersonate user1 and user2 (not user3)
     conf.set(String.format("hadoop.proxyuser.%s.hosts", LOGIN_USER), "*");
     conf.set(String.format("hadoop.proxyuser.%s.users", LOGIN_USER), "user1,user2");
-    conf.setBoolean(QueryServerProperties.QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR_ATTRIB, true);
-    environment = new QueryServerEnvironment(conf, 3, false);
+    environment = new PhoenixEnvironment(conf, 3, false);
   }
 
   public static synchronized void stopEnvironment() throws Exception {
@@ -79,14 +75,7 @@
   }
 
   static public String getUrlTemplate() {
-    String url = Driver.CONNECT_STRING_PREFIX + "url=%s://localhost:" + environment.getPqsPort() + "?"
-      + QueryServerOptions.DEFAULT_QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM + "=%s;authentication=SPNEGO;serialization=PROTOBUF%s";
-    if (environment.getTls()) {
-      return String.format(url, "https", "%s", ";truststore=" + TlsUtil.getTrustStoreFile().getAbsolutePath()
-        + ";truststore_password=" + TlsUtil.getTrustStorePassword());
-    } else {
-      return String.format(url, "http", "%s", "");
-    }
+    return PhoenixRuntime.JDBC_PROTOCOL + ":localhost:%s";
   }
 
   static void grantUsersToPhoenixSystemTables(List<String> usersToGrant) throws Exception {
diff --git a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/PhoenixEnvironment.java b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/PhoenixEnvironment.java
new file mode 100644
index 0000000..88b3d60
--- /dev/null
+++ b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/PhoenixEnvironment.java
@@ -0,0 +1,278 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.phoenix.secured;
+
+import static org.apache.hadoop.hbase.HConstants.HBASE_DIR;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.net.InetAddress;
+import java.util.AbstractMap;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.LocalHBaseCluster;
+import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.util.KerberosName;
+import org.apache.phoenix.query.ConfigurationFactory;
+import org.apache.phoenix.util.InstanceResolver;
+import org.apache.phoenix.util.PhoenixRuntime;
+
+/**
+ * This is a copy of class from `org.apache.phoenix:phoenix-queryserver-it`,
+ * see original javadoc in {@code org.apache.phoenix.end2end.QueryServerEnvironment}.
+ * <p>
+ * It is possible to use original QueryServerEnvironment, but need to solve several issues:
+ * <ul>
+ *   <li>TlsUtil.getClasspathDir(QueryServerEnvironment.class); in QueryServerEnvironment fails due to the path from jar.
+ *   Can be fixed with copying TlsUtil in Drill project and changing getClasspathDir method to use
+ *   SecuredPhoenixTestSuite.class instead of QueryServerEnvironment.class</li>
+ *   <li>SERVICE_PRINCIPAL from QueryServerEnvironment is for `securecluster` not system user. So Test fails later
+ *   in process of starting Drill cluster while creating udf area RemoteFunctionRegistry#createArea, it fails
+ *   on checking Precondition ImpersonationUtil.getProcessUserName().equals(fileStatus.getOwner()),
+ *   where ImpersonationUtil.getProcessUserName() is 'securecluster' and fileStatus.getOwner() is
+ *   your local machine login user</li>
+ * </ul>
+ */
+public class PhoenixEnvironment {
+
+  private final File tempDir = new File(getTempDir());
+  private final File keytabDir = new File(tempDir, "keytabs");
+  private final List<File> userKeytabFiles = new ArrayList<>();
+
+  private static final String LOCAL_HOST_REVERSE_DNS_LOOKUP_NAME;
+  static final String LOGIN_USER;
+
+  static {
+    try {
+      // uncomment it for debugging purposes
+      // System.setProperty("sun.security.krb5.debug", "true");
+      LOCAL_HOST_REVERSE_DNS_LOOKUP_NAME = InetAddress.getByName("127.0.0.1").getCanonicalHostName();
+      String userName = System.getProperty("user.name");
+      LOGIN_USER = userName != null ? userName : "securecluster";
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private static final String SPNEGO_PRINCIPAL = "HTTP/" + LOCAL_HOST_REVERSE_DNS_LOOKUP_NAME;
+  private static final String PQS_PRINCIPAL = "phoenixqs/" + LOCAL_HOST_REVERSE_DNS_LOOKUP_NAME;
+  private static final String SERVICE_PRINCIPAL = LOGIN_USER + "/" + LOCAL_HOST_REVERSE_DNS_LOOKUP_NAME;
+  private final File keytab;
+
+  private final MiniKdc kdc;
+  private final HBaseTestingUtility util = new HBaseTestingUtility(conf());
+  private final LocalHBaseCluster hbaseCluster;
+  private int numCreatedUsers;
+
+  private final String phoenixUrl;
+
+  private static Configuration conf() {
+    Configuration configuration = HBaseConfiguration.create();
+    configuration.set(User.HBASE_SECURITY_CONF_KEY, "kerberos");
+    return configuration;
+  }
+
+  private static String getTempDir() {
+    StringBuilder sb = new StringBuilder(32);
+    sb.append(System.getProperty("user.dir")).append(File.separator);
+    sb.append("target").append(File.separator);
+    sb.append(PhoenixEnvironment.class.getSimpleName());
+    sb.append("-").append(UUID.randomUUID());
+    return sb.toString();
+  }
+
+  public String getPhoenixUrl() {
+    return phoenixUrl;
+  }
+
+  public HBaseTestingUtility getUtil() {
+    return util;
+  }
+
+  public File getServiceKeytab() {
+    return keytab;
+  }
+
+  private static void updateDefaultRealm() throws Exception {
+    // (at least) one other phoenix test triggers the caching of this field before the KDC is up
+    // which causes principal parsing to fail.
+    Field f = KerberosName.class.getDeclaredField("defaultRealm");
+    f.setAccessible(true);
+    // Default realm for MiniKDC
+    f.set(null, "EXAMPLE.COM");
+  }
+
+  private void createUsers(int numUsers) throws Exception {
+    assertNotNull("KDC is null, was setup method called?", kdc);
+    numCreatedUsers = numUsers;
+    for (int i = 1; i <= numUsers; i++) {
+      String principal = "user" + i;
+      File keytabFile = new File(keytabDir, principal + ".keytab");
+      kdc.createPrincipal(keytabFile, principal);
+      userKeytabFiles.add(keytabFile);
+    }
+  }
+
+  public Map.Entry<String, File> getUser(int offset) {
+    if (!(offset > 0 && offset <= numCreatedUsers)) {
+      throw new IllegalArgumentException();
+    }
+    return new AbstractMap.SimpleImmutableEntry<>("user" + offset, userKeytabFiles.get(offset - 1));
+  }
+
+  /**
+   * Setup the security configuration for hdfs.
+   */
+  private void setHdfsSecuredConfiguration(Configuration conf) throws Exception {
+    // Set principal+keytab configuration for HDFS
+    conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,
+      SERVICE_PRINCIPAL + "@" + kdc.getRealm());
+    conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, keytab.getAbsolutePath());
+    conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY,
+      SERVICE_PRINCIPAL + "@" + kdc.getRealm());
+    conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, keytab.getAbsolutePath());
+    conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
+      SPNEGO_PRINCIPAL + "@" + kdc.getRealm());
+    // Enable token access for HDFS blocks
+    conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
+    // Only use HTTPS (required because we aren't using "secure" ports)
+    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
+    // Bind on localhost for spnego to have a chance at working
+    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
+    conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
+
+    // Generate SSL certs
+    File keystoresDir = new File(util.getDataTestDir("keystore").toUri().getPath());
+    keystoresDir.mkdirs();
+
+    // Magic flag to tell hdfs to not fail on using ports above 1024
+    conf.setBoolean("ignore.secure.ports.for.testing", true);
+  }
+
+  private static void ensureIsEmptyDirectory(File f) throws IOException {
+    if (f.exists()) {
+      if (f.isDirectory()) {
+        FileUtils.deleteDirectory(f);
+      } else {
+        assertTrue("Failed to delete keytab directory", f.delete());
+      }
+    }
+    assertTrue("Failed to create keytab directory", f.mkdirs());
+  }
+
+  /**
+   * Setup and start kerberosed, hbase
+   */
+  public PhoenixEnvironment(final Configuration confIn, int numberOfUsers, boolean tls)
+    throws Exception {
+
+    Configuration conf = util.getConfiguration();
+    conf.addResource(confIn);
+    // Ensure the dirs we need are created/empty
+    ensureIsEmptyDirectory(tempDir);
+    ensureIsEmptyDirectory(keytabDir);
+    keytab = new File(keytabDir, "test.keytab");
+    // Start a MiniKDC
+    kdc = util.setupMiniKdc(keytab);
+    // Create a service principal and spnego principal in one keytab
+    // NB. Due to some apparent limitations between HDFS and HBase in the same JVM, trying to
+    // use separate identies for HBase and HDFS results in a GSS initiate error. The quick
+    // solution is to just use a single "service" principal instead of "hbase" and "hdfs"
+    // (or "dn" and "nn") per usual.
+    kdc.createPrincipal(keytab, SPNEGO_PRINCIPAL, PQS_PRINCIPAL, SERVICE_PRINCIPAL);
+    // Start ZK by hand
+    util.startMiniZKCluster();
+
+    // Create a number of unprivileged users
+    createUsers(numberOfUsers);
+
+    // Set configuration for HBase
+    HBaseKerberosUtils.setPrincipalForTesting(SERVICE_PRINCIPAL + "@" + kdc.getRealm());
+    HBaseKerberosUtils.setSecuredConfiguration(conf);
+    setHdfsSecuredConfiguration(conf);
+    UserGroupInformation.setConfiguration(conf);
+    conf.setInt(HConstants.MASTER_PORT, 0);
+    conf.setInt(HConstants.MASTER_INFO_PORT, 0);
+    conf.setInt(HConstants.REGIONSERVER_PORT, 0);
+    conf.setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
+
+    // Clear the cached singletons so we can inject our own.
+    InstanceResolver.clearSingletons();
+    // Make sure the ConnectionInfo doesn't try to pull a default Configuration
+    InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {
+      @Override
+      public Configuration getConfiguration() {
+        return conf;
+      }
+
+      @Override
+      public Configuration getConfiguration(Configuration confToClone) {
+        Configuration copy = new Configuration(conf);
+        copy.addResource(confToClone);
+        return copy;
+      }
+    });
+    updateDefaultRealm();
+
+    // Use LocalHBaseCluster to avoid HBaseTestingUtility from doing something wrong
+    // NB. I'm not actually sure what HTU does incorrect, but this was pulled from some test
+    // classes in HBase itself. I couldn't get HTU to work myself (2017/07/06)
+    Path rootdir = util.getDataTestDirOnTestFS(PhoenixEnvironment.class.getSimpleName());
+    // There is no setRootdir method that is available in all supported HBase versions.
+    conf.set(HBASE_DIR, rootdir.toString());
+    hbaseCluster = new LocalHBaseCluster(conf, 1);
+    hbaseCluster.startup();
+
+    phoenixUrl = PhoenixRuntime.JDBC_PROTOCOL + ":localhost:" + getZookeeperPort();
+  }
+
+  public int getZookeeperPort() {
+    return util.getConfiguration().getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 2181);
+  }
+
+  public void stop() throws Exception {
+    // Remove our custom ConfigurationFactory for future tests
+    InstanceResolver.clearSingletons();
+    if (hbaseCluster != null) {
+      hbaseCluster.shutdown();
+      hbaseCluster.join();
+    }
+    util.shutdownMiniCluster();
+    if (kdc != null) {
+      kdc.stop();
+    }
+    util.closeConnection();
+  }
+}
diff --git a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/QueryServerEnvironment.java b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/QueryServerEnvironment.java
deleted file mode 100644
index 6cbe8df..0000000
--- a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/QueryServerEnvironment.java
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.phoenix.secured;
-
-import static org.apache.hadoop.hbase.HConstants.HBASE_DIR;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.lang.reflect.Field;
-import java.net.InetAddress;
-import java.security.PrivilegedAction;
-import java.util.AbstractMap;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.LocalHBaseCluster;
-import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.minikdc.MiniKdc;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.util.KerberosName;
-import org.apache.phoenix.end2end.TlsUtil;
-import org.apache.phoenix.query.ConfigurationFactory;
-import org.apache.phoenix.queryserver.QueryServerProperties;
-import org.apache.phoenix.queryserver.server.QueryServer;
-import org.apache.phoenix.util.InstanceResolver;
-import org.apache.phoenix.util.ThinClientUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This is a copy of class from `org.apache.phoenix:phoenix-queryserver-it`,
- * see original javadoc in {@link org.apache.phoenix.end2end.QueryServerEnvironment}.
- *
- * It is possible to use original QueryServerEnvironment, but need to solve several issues:
- * <ul>
- *   <li>TlsUtil.getClasspathDir(QueryServerEnvironment.class); in QueryServerEnvironment fails due to the path from jar.
- *   Can be fixed with copying TlsUtil in Drill project and changing getClasspathDir method to use
- *   SecuredPhoenixTestSuite.class instead of QueryServerEnvironment.class</li>
- *   <li>SERVICE_PRINCIPAL from QueryServerEnvironment is for `securecluster` not system user. So Test fails later
- *   in process of starting Drill cluster while creating udf area RemoteFunctionRegistry#createArea, it fails
- *   on checking Precondition ImpersonationUtil.getProcessUserName().equals(fileStatus.getOwner()),
- *   where ImpersonationUtil.getProcessUserName() is 'securecluster' and fileStatus.getOwner() is
- *   your local machine login user</li>
- * </ul>
- */
-public class QueryServerEnvironment {
-  private static final Logger LOG = LoggerFactory.getLogger(QueryServerEnvironment.class);
-
-  private final File TEMP_DIR = new File(getTempDir());
-  private final File KEYTAB_DIR = new File(TEMP_DIR, "keytabs");
-  private final List<File> USER_KEYTAB_FILES = new ArrayList<>();
-
-  private static final String LOCAL_HOST_REVERSE_DNS_LOOKUP_NAME;
-  static final String LOGIN_USER;
-
-  static {
-    try {
-      // uncomment it for debugging purposes
-      // System.setProperty("sun.security.krb5.debug", "true");
-      LOCAL_HOST_REVERSE_DNS_LOOKUP_NAME = InetAddress.getByName("127.0.0.1").getCanonicalHostName();
-      String userName = System.getProperty("user.name");
-      LOGIN_USER = userName != null ? userName : "securecluster";
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  private static final String SPNEGO_PRINCIPAL = "HTTP/" + LOCAL_HOST_REVERSE_DNS_LOOKUP_NAME;
-  private static final String PQS_PRINCIPAL = "phoenixqs/" + LOCAL_HOST_REVERSE_DNS_LOOKUP_NAME;
-  private static final String SERVICE_PRINCIPAL = LOGIN_USER + "/" + LOCAL_HOST_REVERSE_DNS_LOOKUP_NAME;
-  private File KEYTAB;
-
-  private MiniKdc KDC;
-  private HBaseTestingUtility UTIL = new HBaseTestingUtility();
-  private LocalHBaseCluster HBASE_CLUSTER;
-  private int NUM_CREATED_USERS;
-
-  private ExecutorService PQS_EXECUTOR;
-  private QueryServer PQS;
-  private int PQS_PORT;
-  private String PQS_URL;
-
-  private boolean tls;
-
-  private static String getTempDir() {
-    StringBuilder sb = new StringBuilder(32);
-    sb.append(System.getProperty("user.dir")).append(File.separator);
-    sb.append("target").append(File.separator);
-    sb.append(QueryServerEnvironment.class.getSimpleName());
-    sb.append("-").append(UUID.randomUUID());
-    return sb.toString();
-  }
-
-  public int getPqsPort() {
-    return PQS_PORT;
-  }
-
-  public String getPqsUrl() {
-    return PQS_URL;
-  }
-
-  public boolean getTls() {
-    return tls;
-  }
-
-  public HBaseTestingUtility getUtil() {
-    return UTIL;
-  }
-
-  public String getServicePrincipal() {
-    return SERVICE_PRINCIPAL;
-  }
-
-  public File getServiceKeytab() {
-    return KEYTAB;
-  }
-
-  private static void updateDefaultRealm() throws Exception {
-    // (at least) one other phoenix test triggers the caching of this field before the KDC is up
-    // which causes principal parsing to fail.
-    Field f = KerberosName.class.getDeclaredField("defaultRealm");
-    f.setAccessible(true);
-    // Default realm for MiniKDC
-    f.set(null, "EXAMPLE.COM");
-  }
-
-  private void createUsers(int numUsers) throws Exception {
-    assertNotNull("KDC is null, was setup method called?", KDC);
-    NUM_CREATED_USERS = numUsers;
-    for (int i = 1; i <= numUsers; i++) {
-      String principal = "user" + i;
-      File keytabFile = new File(KEYTAB_DIR, principal + ".keytab");
-      KDC.createPrincipal(keytabFile, principal);
-      USER_KEYTAB_FILES.add(keytabFile);
-    }
-  }
-
-  public Map.Entry<String, File> getUser(int offset) {
-    if (!(offset > 0 && offset <= NUM_CREATED_USERS)) {
-      throw new IllegalArgumentException();
-    }
-    return new AbstractMap.SimpleImmutableEntry<String, File>("user" + offset, USER_KEYTAB_FILES.get(offset - 1));
-  }
-
-  /**
-   * Setup the security configuration for hdfs.
-   */
-  private void setHdfsSecuredConfiguration(Configuration conf) throws Exception {
-    // Set principal+keytab configuration for HDFS
-    conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,
-      SERVICE_PRINCIPAL + "@" + KDC.getRealm());
-    conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, KEYTAB.getAbsolutePath());
-    conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY,
-      SERVICE_PRINCIPAL + "@" + KDC.getRealm());
-    conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, KEYTAB.getAbsolutePath());
-    conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
-      SPNEGO_PRINCIPAL + "@" + KDC.getRealm());
-    // Enable token access for HDFS blocks
-    conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
-    // Only use HTTPS (required because we aren't using "secure" ports)
-    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
-    // Bind on localhost for spnego to have a chance at working
-    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
-    conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
-
-    // Generate SSL certs
-    File keystoresDir = new File(UTIL.getDataTestDir("keystore").toUri().getPath());
-    keystoresDir.mkdirs();
-    String sslConfDir = TlsUtil.getClasspathDir(QueryServerEnvironment.class);
-    TlsUtil.setupSSLConfig(keystoresDir.getAbsolutePath(), sslConfDir, conf, false);
-
-    // Magic flag to tell hdfs to not fail on using ports above 1024
-    conf.setBoolean("ignore.secure.ports.for.testing", true);
-  }
-
-  private static void ensureIsEmptyDirectory(File f) throws IOException {
-    if (f.exists()) {
-      if (f.isDirectory()) {
-        FileUtils.deleteDirectory(f);
-      } else {
-        assertTrue("Failed to delete keytab directory", f.delete());
-      }
-    }
-    assertTrue("Failed to create keytab directory", f.mkdirs());
-  }
-
-  /**
-   * Setup and start kerberosed, hbase
-   * @throws Exception
-   */
-  public QueryServerEnvironment(final Configuration confIn, int numberOfUsers, boolean tls)
-    throws Exception {
-    this.tls = tls;
-
-    Configuration conf = UTIL.getConfiguration();
-    conf.addResource(confIn);
-    // Ensure the dirs we need are created/empty
-    ensureIsEmptyDirectory(TEMP_DIR);
-    ensureIsEmptyDirectory(KEYTAB_DIR);
-    KEYTAB = new File(KEYTAB_DIR, "test.keytab");
-    // Start a MiniKDC
-    KDC = UTIL.setupMiniKdc(KEYTAB);
-    // Create a service principal and spnego principal in one keytab
-    // NB. Due to some apparent limitations between HDFS and HBase in the same JVM, trying to
-    // use separate identies for HBase and HDFS results in a GSS initiate error. The quick
-    // solution is to just use a single "service" principal instead of "hbase" and "hdfs"
-    // (or "dn" and "nn") per usual.
-    KDC.createPrincipal(KEYTAB, SPNEGO_PRINCIPAL, PQS_PRINCIPAL, SERVICE_PRINCIPAL);
-    // Start ZK by hand
-    UTIL.startMiniZKCluster();
-
-    // Create a number of unprivileged users
-    createUsers(numberOfUsers);
-
-    // Set configuration for HBase
-    HBaseKerberosUtils.setPrincipalForTesting(SERVICE_PRINCIPAL + "@" + KDC.getRealm());
-    HBaseKerberosUtils.setSecuredConfiguration(conf);
-    setHdfsSecuredConfiguration(conf);
-    UserGroupInformation.setConfiguration(conf);
-    conf.setInt(HConstants.MASTER_PORT, 0);
-    conf.setInt(HConstants.MASTER_INFO_PORT, 0);
-    conf.setInt(HConstants.REGIONSERVER_PORT, 0);
-    conf.setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
-
-    if (tls) {
-      conf.setBoolean(QueryServerProperties.QUERY_SERVER_TLS_ENABLED, true);
-      conf.set(QueryServerProperties.QUERY_SERVER_TLS_KEYSTORE,
-        TlsUtil.getKeyStoreFile().getAbsolutePath());
-      conf.set(QueryServerProperties.QUERY_SERVER_TLS_KEYSTORE_PASSWORD,
-        TlsUtil.getKeyStorePassword());
-      conf.set(QueryServerProperties.QUERY_SERVER_TLS_TRUSTSTORE,
-        TlsUtil.getTrustStoreFile().getAbsolutePath());
-      conf.set(QueryServerProperties.QUERY_SERVER_TLS_TRUSTSTORE_PASSWORD,
-        TlsUtil.getTrustStorePassword());
-    }
-
-    // Secure Phoenix setup
-    conf.set(QueryServerProperties.QUERY_SERVER_KERBEROS_HTTP_PRINCIPAL_ATTRIB_LEGACY,
-      SPNEGO_PRINCIPAL + "@" + KDC.getRealm());
-    conf.set(QueryServerProperties.QUERY_SERVER_HTTP_KEYTAB_FILENAME_ATTRIB,
-      KEYTAB.getAbsolutePath());
-    conf.set(QueryServerProperties.QUERY_SERVER_KERBEROS_PRINCIPAL_ATTRIB,
-      PQS_PRINCIPAL + "@" + KDC.getRealm());
-    conf.set(QueryServerProperties.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB,
-      KEYTAB.getAbsolutePath());
-    conf.setBoolean(QueryServerProperties.QUERY_SERVER_DISABLE_KERBEROS_LOGIN, true);
-    conf.setInt(QueryServerProperties.QUERY_SERVER_HTTP_PORT_ATTRIB, 0);
-    // Required so that PQS can impersonate the end-users to HBase
-    conf.set("hadoop.proxyuser.phoenixqs.groups", "*");
-    conf.set("hadoop.proxyuser.phoenixqs.hosts", "*");
-
-    // Clear the cached singletons so we can inject our own.
-    InstanceResolver.clearSingletons();
-    // Make sure the ConnectionInfo doesn't try to pull a default Configuration
-    InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {
-      @Override
-      public Configuration getConfiguration() {
-        return conf;
-      }
-
-      @Override
-      public Configuration getConfiguration(Configuration confToClone) {
-        Configuration copy = new Configuration(conf);
-        copy.addResource(confToClone);
-        return copy;
-      }
-    });
-    updateDefaultRealm();
-
-    // Start HDFS
-    UTIL.startMiniDFSCluster(1);
-    // Use LocalHBaseCluster to avoid HBaseTestingUtility from doing something wrong
-    // NB. I'm not actually sure what HTU does incorrect, but this was pulled from some test
-    // classes in HBase itself. I couldn't get HTU to work myself (2017/07/06)
-    Path rootdir = UTIL.getDataTestDirOnTestFS(QueryServerEnvironment.class.getSimpleName());
-    // There is no setRootdir method that is available in all supported HBase versions.
-    conf.set(HBASE_DIR, rootdir.toString());
-    HBASE_CLUSTER = new LocalHBaseCluster(conf, 1);
-    HBASE_CLUSTER.startup();
-
-    // Then fork a thread with PQS in it.
-    configureAndStartQueryServer(tls);
-  }
-
-  private void configureAndStartQueryServer(boolean tls) throws Exception {
-    PQS = new QueryServer(new String[0], UTIL.getConfiguration());
-    // Get the PQS ident for PQS to use
-    final UserGroupInformation ugi =
-      UserGroupInformation.loginUserFromKeytabAndReturnUGI(PQS_PRINCIPAL,
-        KEYTAB.getAbsolutePath());
-    PQS_EXECUTOR = Executors.newSingleThreadExecutor();
-    // Launch PQS, doing in the Kerberos login instead of letting PQS do it itself (which would
-    // break the HBase/HDFS logins also running in the same test case).
-    PQS_EXECUTOR.submit(new Runnable() {
-      @Override
-      public void run() {
-        ugi.doAs(new PrivilegedAction<Void>() {
-          @Override
-          public Void run() {
-            PQS.run();
-            return null;
-          }
-        });
-      }
-    });
-    PQS.awaitRunning();
-    PQS_PORT = PQS.getPort();
-    PQS_URL =
-      ThinClientUtil.getConnectionUrl(tls ? "https" : "http", "localhost", PQS_PORT)
-        + ";authentication=SPNEGO" + (tls
-        ? ";truststore=" + TlsUtil.getTrustStoreFile().getAbsolutePath()
-        + ";truststore_password=" + TlsUtil.getTrustStorePassword()
-        : "");
-    LOG.debug("Phoenix Query Server URL: {}", PQS_URL);
-  }
-
-  public void stop() throws Exception {
-    // Remove our custom ConfigurationFactory for future tests
-    InstanceResolver.clearSingletons();
-    if (PQS_EXECUTOR != null) {
-      PQS.stop();
-      PQS_EXECUTOR.shutdown();
-      if (!PQS_EXECUTOR.awaitTermination(5, TimeUnit.SECONDS)) {
-        LOG.info("PQS didn't exit in 5 seconds, proceeding anyways.");
-      }
-    }
-    if (HBASE_CLUSTER != null) {
-      HBASE_CLUSTER.shutdown();
-      HBASE_CLUSTER.join();
-    }
-    if (UTIL != null) {
-      UTIL.shutdownMiniZKCluster();
-    }
-    if (KDC != null) {
-      KDC.stop();
-    }
-  }
-}
diff --git a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixBaseTest.java b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixBaseTest.java
index a490f44..852a336 100644
--- a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixBaseTest.java
+++ b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixBaseTest.java
@@ -36,10 +36,8 @@
 import org.apache.drill.test.LogFixture;
 import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.phoenix.queryserver.server.QueryServer;
-import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
-import org.junit.jupiter.api.BeforeAll;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -47,7 +45,7 @@
 import java.nio.file.Paths;
 import java.security.PrivilegedExceptionAction;
 import java.util.Arrays;
-import java.util.HashMap;
+import java.util.Collections;
 import java.util.Map;
 import java.util.Properties;
 import java.util.TimeZone;
@@ -56,11 +54,12 @@
 import static org.apache.drill.exec.store.phoenix.PhoenixBaseTest.createSampleData;
 import static org.apache.drill.exec.store.phoenix.PhoenixBaseTest.createSchema;
 import static org.apache.drill.exec.store.phoenix.PhoenixBaseTest.createTables;
-import static org.apache.drill.exec.store.phoenix.secured.HttpParamImpersonationQueryServerIT.environment;
-import static org.apache.drill.exec.store.phoenix.secured.HttpParamImpersonationQueryServerIT.getUrlTemplate;
-import static org.apache.drill.exec.store.phoenix.secured.HttpParamImpersonationQueryServerIT.grantUsersToGlobalPhoenixUserTables;
-import static org.apache.drill.exec.store.phoenix.secured.HttpParamImpersonationQueryServerIT.grantUsersToPhoenixSystemTables;
-import static org.apache.drill.exec.store.phoenix.secured.SecuredPhoenixTestSuite.initPhoenixQueryServer;
+import static org.apache.drill.exec.store.phoenix.secured.ImpersonationPhoenixIT.environment;
+import static org.apache.drill.exec.store.phoenix.secured.ImpersonationPhoenixIT.getUrlTemplate;
+import static org.apache.drill.exec.store.phoenix.secured.ImpersonationPhoenixIT.grantUsersToGlobalPhoenixUserTables;
+import static org.apache.drill.exec.store.phoenix.secured.ImpersonationPhoenixIT.grantUsersToPhoenixSystemTables;
+import static org.apache.drill.exec.store.phoenix.secured.SecuredPhoenixTestSuite.initPhoenix;
+import static org.junit.Assert.assertThrows;
 
 public abstract class SecuredPhoenixBaseTest extends ClusterTest {
   private static final Logger logger = LoggerFactory.getLogger(PhoenixDataSource.class);
@@ -70,10 +69,10 @@
 
   private final static AtomicInteger initCount = new AtomicInteger(0);
 
-  @BeforeAll
+  @BeforeClass
   public static void setUpBeforeClass() throws Exception {
     TimeZone.setDefault(TimeZone.getTimeZone("UTC"));
-    initPhoenixQueryServer();
+    initPhoenix();
     startSecuredDrillCluster();
     initializeDatabase();
   }
@@ -81,11 +80,10 @@
   private static void startSecuredDrillCluster() throws Exception {
     logFixture = LogFixture.builder()
       .toConsole()
-      .logger(QueryServerEnvironment.class, CURRENT_LOG_LEVEL)
+      .logger(PhoenixEnvironment.class, CURRENT_LOG_LEVEL)
       .logger(SecuredPhoenixBaseTest.class, CURRENT_LOG_LEVEL)
       .logger(KerberosFactory.class, CURRENT_LOG_LEVEL)
       .logger(Krb5LoginModule.class, CURRENT_LOG_LEVEL)
-      .logger(QueryServer.class, CURRENT_LOG_LEVEL)
       .logger(ServerAuthenticationHandler.class, CURRENT_LOG_LEVEL)
       .build();
 
@@ -118,15 +116,10 @@
     user3ClientProperties.setProperty(DrillProperties.KEYTAB, user3.getValue().getAbsolutePath());
     cluster.addClientFixture(user3ClientProperties);
 
-    Map<String, Object> phoenixProps = new HashMap<>();
-    phoenixProps.put("phoenix.query.timeoutMs", 90000);
-    phoenixProps.put("phoenix.query.keepAliveMs", "30000");
-    phoenixProps.put("phoenix.queryserver.withRemoteUserExtractor", true);
     StoragePluginRegistry registry = cluster.drillbit().getContext().getStorage();
-    final String doAsUrl = String.format(getUrlTemplate(), "$user");
-    logger.debug("Phoenix Query Server URL: {}", environment.getPqsUrl());
-    PhoenixStoragePluginConfig config = new PhoenixStoragePluginConfig(null, 0, null, null,
-      doAsUrl, null, phoenixProps);
+    logger.debug("Phoenix URL: {}", environment.getPhoenixUrl());
+    PhoenixStoragePluginConfig config = new PhoenixStoragePluginConfig(null, 0, null, null, null,
+      getUrlTemplate(), null, Collections.emptyMap());
     config.setEnabled(true);
     registry.put(PhoenixStoragePluginConfig.NAME + "123", config);
   }
@@ -143,9 +136,9 @@
       // Build the JDBC URL by hand with the doAs
       final UserGroupInformation serviceUgi = ImpersonationUtil.getProcessUserUGI();
       serviceUgi.doAs((PrivilegedExceptionAction<Void>) () -> {
-        createSchema(environment.getPqsUrl());
-        createTables(environment.getPqsUrl());
-        createSampleData(environment.getPqsUrl());
+        createSchema(environment.getPhoenixUrl());
+        createTables(environment.getPhoenixUrl());
+        createSampleData(environment.getPhoenixUrl());
         grantUsersToPhoenixSystemTables(Arrays.asList(user1.getKey(), user2.getKey()));
         grantUsersToGlobalPhoenixUserTables(Arrays.asList(user1.getKey()));
         return null;
@@ -173,19 +166,19 @@
       wrapper.apply();
       client = cluster.client(1);
       // original is AccessDeniedException: Insufficient permissions for user 'user2'
-      Assertions.assertThrows(user2ExpectedException, wrapper::apply);
+      assertThrows(user2ExpectedException, wrapper::apply);
       client = cluster.client(2);
       // RuntimeException for user3, Failed to execute HTTP Request, got HTTP/401
-      Assertions.assertThrows(user3ExpectedException, wrapper::apply);
+      assertThrows(user3ExpectedException, wrapper::apply);
     } finally {
       client = cluster.client(0);
     }
   }
 
-  @AfterAll
+  @AfterClass
   public static void tearDownCluster() throws Exception {
     if (!SecuredPhoenixTestSuite.isRunningSuite() && environment != null) {
-      HttpParamImpersonationQueryServerIT.stopEnvironment();
+      ImpersonationPhoenixIT.stopEnvironment();
     }
   }
 }
diff --git a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixCommandTest.java b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixCommandTest.java
index 1c54ff4..46e1017 100644
--- a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixCommandTest.java
+++ b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixCommandTest.java
@@ -26,12 +26,12 @@
 import org.apache.drill.exec.record.metadata.TupleMetadata;
 import org.apache.drill.test.QueryBuilder;
 import org.apache.drill.test.rowSet.RowSetComparison;
-import org.junit.jupiter.api.Assertions;
-import org.junit.jupiter.api.Tag;
-import org.junit.jupiter.api.Test;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
-@Tag(SlowTest.TAG)
-@Tag(RowSetTest.TAG)
+import static org.junit.Assert.assertEquals;
+
+@Category({ SlowTest.class, RowSetTest.class })
 public class SecuredPhoenixCommandTest extends SecuredPhoenixBaseTest {
 
   @Test
@@ -42,7 +42,7 @@
   private void doTestShowTablesLike() throws Exception {
     runAndPrint("SHOW SCHEMAS");
     run("USE phoenix123.V1");
-    Assertions.assertEquals(1, queryBuilder().sql("SHOW TABLES LIKE '%REGION%'").run().recordCount());
+    assertEquals(1, queryBuilder().sql("SHOW TABLES LIKE '%REGION%'").run().recordCount());
   }
 
   @Test
@@ -77,6 +77,6 @@
 
   private void doTestDescribe() throws Exception {
     run("USE phoenix123.v1");
-    Assertions.assertEquals(4, queryBuilder().sql("DESCRIBE NATION").run().recordCount());
+    assertEquals(4, queryBuilder().sql("DESCRIBE NATION").run().recordCount());
   }
 }
diff --git a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixDataTypeTest.java b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixDataTypeTest.java
index 2938f46..67ffa5d 100644
--- a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixDataTypeTest.java
+++ b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixDataTypeTest.java
@@ -26,8 +26,8 @@
 import org.apache.drill.exec.record.metadata.TupleMetadata;
 import org.apache.drill.test.QueryBuilder;
 import org.apache.drill.test.rowSet.RowSetComparison;
-import org.junit.jupiter.api.Tag;
-import org.junit.jupiter.api.Test;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
 import java.math.BigDecimal;
 import java.time.Instant;
@@ -43,8 +43,7 @@
 import static org.apache.drill.test.rowSet.RowSetUtilities.shortArray;
 import static org.apache.drill.test.rowSet.RowSetUtilities.strArray;
 
-@Tag(SlowTest.TAG)
-@Tag(RowSetTest.TAG)
+@Category({ SlowTest.class, RowSetTest.class })
 public class SecuredPhoenixDataTypeTest extends SecuredPhoenixBaseTest {
 
   @Test
diff --git a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixSQLTest.java b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixSQLTest.java
index 86ecd3f..55918b8 100644
--- a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixSQLTest.java
+++ b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixSQLTest.java
@@ -28,14 +28,13 @@
 import org.apache.drill.exec.rpc.RpcException;
 import org.apache.drill.test.QueryBuilder;
 import org.apache.drill.test.rowSet.RowSetComparison;
-import org.junit.jupiter.api.Disabled;
-import org.junit.jupiter.api.Tag;
-import org.junit.jupiter.api.Test;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.Assert.assertEquals;
 
-@Tag(SlowTest.TAG)
-@Tag(RowSetTest.TAG)
+@Category({ SlowTest.class, RowSetTest.class })
 public class SecuredPhoenixSQLTest extends SecuredPhoenixBaseTest {
 
   @Test
@@ -175,7 +174,7 @@
     String sql = "select count(*) as total from phoenix123.v1.nation";
     String plan = queryBuilder().sql(sql).explainJson();
     long cnt = queryBuilder().physical(plan).singletonLong();
-    assertEquals(25, cnt, "Counts should match");
+    assertEquals("Counts should match", 25, cnt);
   }
 
   @Test
@@ -243,12 +242,12 @@
 
     builder.planMatcher().exclude("Join").match();
 
-    assertEquals(625, sets.rowCount(), "Counts should match");
+    assertEquals("Counts should match", 625, sets.rowCount());
     sets.clear();
   }
 
   @Test
-  @Disabled("use the remote query server directly without minicluster")
+  @Ignore("use the remote phoenix directly without minicluster")
   public void testJoinWithFilterPushdown() throws Exception {
     String sql = "select 10 as DRILL, a.n_name, b.r_name from phoenix123.v1.nation a join phoenix123.v1.region b "
         + "on a.n_regionkey = b.r_regionkey where b.r_name = 'ASIA'";
diff --git a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixTestSuite.java b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixTestSuite.java
index 2c4c6fa..89f1a99 100644
--- a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixTestSuite.java
+++ b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixTestSuite.java
@@ -19,29 +19,26 @@
 
 import org.apache.drill.categories.RowSetTest;
 import org.apache.drill.categories.SlowTest;
-import org.apache.drill.exec.store.phoenix.QueryServerBasicsIT;
+import org.apache.drill.exec.store.phoenix.PhoenixBasicsIT;
 import org.apache.drill.test.BaseTest;
-import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.Disabled;
-import org.junit.jupiter.api.Tag;
-import org.junit.platform.suite.api.SelectClasses;
-import org.junit.platform.suite.api.Suite;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Suite;
 import org.slf4j.LoggerFactory;
 
 import java.util.TimeZone;
 import java.util.concurrent.atomic.AtomicInteger;
 
 
-@Suite
-@SelectClasses({
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
   SecuredPhoenixDataTypeTest.class,
   SecuredPhoenixSQLTest.class,
   SecuredPhoenixCommandTest.class
 })
-@Disabled
-@Tag(SlowTest.TAG)
-@Tag(RowSetTest.TAG)
+@Category({ SlowTest.class, RowSetTest.class })
 public class SecuredPhoenixTestSuite extends BaseTest {
 
   private static final org.slf4j.Logger logger = LoggerFactory.getLogger(SecuredPhoenixTestSuite.class);
@@ -49,25 +46,25 @@
   private static volatile boolean runningSuite = false;
   private static final AtomicInteger initCount = new AtomicInteger(0);
 
-  @BeforeAll
-  public static void initPhoenixQueryServer() throws Exception {
+  @BeforeClass
+  public static void initPhoenix() throws Exception {
     TimeZone.setDefault(TimeZone.getTimeZone("UTC"));
     synchronized (SecuredPhoenixTestSuite.class) {
       if (initCount.get() == 0) {
         logger.info("Boot the test cluster...");
-        HttpParamImpersonationQueryServerIT.startQueryServerEnvironment();
+        ImpersonationPhoenixIT.startPhoenixEnvironment();
       }
       initCount.incrementAndGet();
       runningSuite = true;
     }
   }
 
-  @AfterAll
+  @AfterClass
   public static void tearDownCluster() throws Exception {
     synchronized (SecuredPhoenixTestSuite.class) {
       if (initCount.decrementAndGet() == 0) {
         logger.info("Shutdown all instances of test cluster.");
-        QueryServerBasicsIT.afterClass();
+        PhoenixBasicsIT.afterClass();
       }
     }
   }
diff --git a/contrib/storage-phoenix/src/test/resources/hbase-site.xml b/contrib/storage-phoenix/src/test/resources/hbase-site.xml
index 159c702..3ba8319 100644
--- a/contrib/storage-phoenix/src/test/resources/hbase-site.xml
+++ b/contrib/storage-phoenix/src/test/resources/hbase-site.xml
@@ -28,4 +28,8 @@
     <name>phoenix.schema.isNamespaceMappingEnabled</name>
     <value>true</value>
   </property>
+  <property>
+    <name>hbase.wal.provider</name>
+    <value>filesystem</value>
+  </property>
 </configuration>