ORC-126. Remove our fork of storage-api. (omalley)

Fixes #80

Signed-off-by: Owen O'Malley <omalley@apache.org>
diff --git a/java/CMakeLists.txt b/java/CMakeLists.txt
index eaa62b0..4b9d906 100644
--- a/java/CMakeLists.txt
+++ b/java/CMakeLists.txt
@@ -16,7 +16,6 @@
                 WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
 
 set(ORC_JARS
-  ${CMAKE_CURRENT_BINARY_DIR}/storage-api/hive-storage-api-2.1.1.3-pre-orc.jar
   ${CMAKE_CURRENT_BINARY_DIR}/core/orc-core-${ORC_VERSION}.jar
   ${CMAKE_CURRENT_BINARY_DIR}/mapreduce/orc-mapreduce-${ORC_VERSION}.jar
   ${CMAKE_CURRENT_BINARY_DIR}/tools/orc-tools-${ORC_VERSION}-uber.jar
diff --git a/java/pom.xml b/java/pom.xml
index 38b3cde..ff38ae6 100644
--- a/java/pom.xml
+++ b/java/pom.xml
@@ -57,7 +57,6 @@
   </mailingLists>
 
   <modules>
-    <module>storage-api</module>
     <module>core</module>
     <module>mapreduce</module>
     <module>tools</module>
diff --git a/java/storage-api/pom.xml b/java/storage-api/pom.xml
deleted file mode 100644
index 64e0e63..0000000
--- a/java/storage-api/pom.xml
+++ /dev/null
@@ -1,146 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache</groupId>
-    <artifactId>apache</artifactId>
-    <version>18</version>
-    <relativePath></relativePath>
-  </parent>
-
-  <groupId>org.apache.hive</groupId>
-  <artifactId>hive-storage-api</artifactId>
-  <!-- remove our custom version of storage-api once we get the changes
-       released as hive 2.1.1 -->
-  <version>2.1.1.3-pre-orc</version>
-  <packaging>jar</packaging>
-  <name>Hive Storage API</name>
-
-  <dependencies>
-    <!-- test inter-project -->
-    <dependency>
-      <groupId>commons-lang</groupId>
-      <artifactId>commons-lang</artifactId>
-      <version>2.6</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <version>2.6.0</version>
-      <scope>provided</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>com.google.guava</groupId>
-          <artifactId>guava</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.google.code.findbugs</groupId>
-          <artifactId>jsr305</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>commons-logging</groupId>
-          <artifactId>commons-logging</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>javax.servlet</groupId>
-          <artifactId>servlet-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.slf4j</groupId>
-          <artifactId>slf4j-log4j12</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-api</artifactId>
-      <version>1.7.5</version>
-    </dependency>
-
-    <!-- test inter-project -->
-    <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-      <version>14.0.1</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <version>4.11</version>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <sourceDirectory>${basedir}/src/java</sourceDirectory>
-    <testSourceDirectory>${basedir}/src/test</testSourceDirectory>
-    <testResources>
-      <testResource>
-        <directory>${basedir}/src/test/resources</directory>
-      </testResource>
-    </testResources>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-compiler-plugin</artifactId>
-        <version>3.1</version>
-        <configuration>
-          <source>1.7</source>
-          <target>1.7</target>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-javadoc-plugin</artifactId>
-        <version>2.10.4</version>
-        <configuration>
-          <reportOutputDirectory>${project.basedir}/../../site/api</reportOutputDirectory>
-          <destDir>${project.artifactId}</destDir>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-project-info-reports-plugin</artifactId>
-        <version>2.9</version>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-surefire-plugin</artifactId>
-        <version>2.16</version>
-        <configuration>
-          <reuseForks>false</reuseForks>
-          <argLine>-Xmx2048m</argLine>
-          <failIfNoTests>false</failIfNoTests>
-          <systemPropertyVariables>
-            <test.tmp.dir>${test.tmp.dir}</test.tmp.dir>
-          </systemPropertyVariables>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-
-  <profiles>
-    <profile>
-      <id>cmake</id>
-      <build>
-        <directory>${build.dir}/storage-api</directory>
-      </build>
-    </profile>
-  </profiles>
-</project>
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/common/DiskRangeInfo.java b/java/storage-api/src/java/org/apache/hadoop/hive/common/DiskRangeInfo.java
deleted file mode 100644
index e5025bf..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/common/DiskRangeInfo.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.common;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hive.common.io.DiskRange;
-
-/**
- * Disk range information class containing disk ranges and total length.
- */
-public class DiskRangeInfo {
-  List<DiskRange> diskRanges; // TODO: use DiskRangeList instead
-  long totalLength;
-
-  public DiskRangeInfo(int indexBaseOffset) {
-    this.diskRanges = new ArrayList<>();
-    // Some data is missing from the stream for PPD uncompressed read (because index offset is
-    // relative to the entire stream and we only read part of stream if RGs are filtered; unlike
-    // with compressed data where PPD only filters CBs, so we always get full CB, and index offset
-    // is relative to CB). To take care of the case when UncompressedStream goes seeking around by
-    // its incorrect (relative to partial stream) index offset, we will increase the length by our
-    // offset-relative-to-the-stream, and also account for it in buffers (see createDiskRangeInfo).
-    // So, index offset now works; as long as noone seeks into this data before the RG (why would
-    // they), everything works. This is hacky... Stream shouldn't depend on having all the data.
-    this.totalLength = indexBaseOffset;
-  }
-
-  public void addDiskRange(DiskRange diskRange) {
-    diskRanges.add(diskRange);
-    totalLength += diskRange.getLength();
-  }
-
-  public List<DiskRange> getDiskRanges() {
-    return diskRanges;
-  }
-
-  public long getTotalLength() {
-    return totalLength;
-  }
-}
-
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/common/Pool.java b/java/storage-api/src/java/org/apache/hadoop/hive/common/Pool.java
deleted file mode 100644
index 272bbdd..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/common/Pool.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.common;
-
-/** Simple object pool to prevent GC on small objects passed between threads. */
-public interface Pool<T> {
-  /** Object helper for objects stored in the pool. */
-  public interface PoolObjectHelper<T> {
-    /** Called to create an object when one cannot be provided. */
-    T create();
-    /** Called before the object is put in the pool (regardless of whether put succeeds). */
-    void resetBeforeOffer(T t);
-  }
-
-  T take();
-  void offer(T t);
-  int size();
-}
\ No newline at end of file
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/common/io/Allocator.java b/java/storage-api/src/java/org/apache/hadoop/hive/common/io/Allocator.java
deleted file mode 100644
index fd9d9c9..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/common/io/Allocator.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.common.io;
-
-import org.apache.hadoop.hive.common.io.encoded.MemoryBuffer;
-
-/** An allocator provided externally to storage classes to allocate MemoryBuffer-s. */
-public interface Allocator {
-  public static class AllocatorOutOfMemoryException extends RuntimeException {
-    public AllocatorOutOfMemoryException(String msg) {
-      super(msg);
-    }
-
-    private static final long serialVersionUID = 268124648177151761L;
-  }
-
-  /**
-   * Allocates multiple buffers of a given size.
-   * @param dest Array where buffers are placed. Objects are reused if already there
-   *             (see createUnallocated), created otherwise.
-   * @param size Allocation size.
-   * @throws AllocatorOutOfMemoryException Cannot allocate.
-   */
-  void allocateMultiple(MemoryBuffer[] dest, int size) throws AllocatorOutOfMemoryException;
-
-  /**
-   * Creates an unallocated memory buffer object. This object can be passed to allocateMultiple
-   * to allocate; this is useful if data structures are created for separate buffers that can
-   * later be allocated together.
-   */
-  MemoryBuffer createUnallocated();
-  /** Deallocates a memory buffer. */
-  void deallocate(MemoryBuffer buffer);
-  /** Whether the allocator uses direct buffers. */
-  boolean isDirectAlloc();
-  /** Maximum allocation size supported by this allocator. */
-  int getMaxAllocation();
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/common/io/DataCache.java b/java/storage-api/src/java/org/apache/hadoop/hive/common/io/DataCache.java
deleted file mode 100644
index e172059..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/common/io/DataCache.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.common.io;
-
-import org.apache.hadoop.hive.common.io.encoded.MemoryBuffer;
-
-/** An abstract data cache that IO formats can use to retrieve and cache data. */
-public interface DataCache {
-  public static final class BooleanRef {
-    public boolean value;
-  }
-
-  /** Disk range factory used during cache retrieval. */
-  public interface DiskRangeListFactory {
-    DiskRangeList createCacheChunk(MemoryBuffer buffer, long startOffset, long endOffset);
-  }
-
-  /**
-   * Gets file data for particular offsets. The range list is modified in place; it is then
-   * returned (since the list head could have changed). Ranges are replaced with cached ranges.
-   *
-   * Any such buffer is locked in cache to prevent eviction, and must therefore be released
-   * back to cache via a corresponding call (releaseBuffer) when the caller is done with it.
-   *
-   * In case of partial overlap with cached data, full cache blocks are always returned;
-   * there's no capacity for partial matches in return type. The rules are as follows:
-   * 1) If the requested range starts in the middle of a cached range, that cached range will not
-   *    be returned by default (e.g. if [100,200) and [200,300) are cached, the request for
-   *    [150,300) will only return [200,300) from cache). This may be configurable in impls.
-   *    This is because we assume well-known range start offsets are used (rg/stripe offsets), so
-   *    a request from the middle of the start doesn't make sense.
-   * 2) If the requested range ends in the middle of a cached range, that entire cached range will
-   *    be returned (e.g. if [100,200) and [200,300) are cached, the request for [100,250) will
-   *    return both ranges). It should really be same as #1, however currently ORC uses estimated
-   *    end offsets; if we don't return the end block, the caller may read it from disk needlessly.
-   *
-   * @param fileKey Unique ID of the target file on the file system.
-   * @param range A set of DiskRange-s (linked list) that is to be retrieved. May be modified.
-   * @param baseOffset base offset for the ranges (stripe/stream offset in case of ORC).
-   * @param factory A factory to produce DiskRangeList-s out of cached MemoryBuffer-s.
-   * @param gotAllData An out param - whether all the requested data was found in cache.
-   * @return The new or modified list of DiskRange-s, where some ranges may contain cached data.
-   */
-  DiskRangeList getFileData(Object fileKey, DiskRangeList range, long baseOffset,
-      DiskRangeListFactory factory, BooleanRef gotAllData);
-
-  /**
-   * Puts file data into cache, or gets older data in case of collisions.
-   *
-   * The memory buffers provided MUST be allocated via an allocator returned by getAllocator
-   * method, to allow cache implementations that evict and then de-allocate the buffer.
-   *
-   * It is assumed that the caller will use the data immediately, therefore any buffers provided
-   * to putFileData (or returned due to cache collision) are locked in cache to prevent eviction,
-   * and must therefore be released back to cache via a corresponding call (releaseBuffer) when the
-   * caller is done with it. Buffers rejected due to conflict will neither be locked, nor
-   * automatically deallocated. The caller must take care to discard these buffers.
-   *
-   * @param fileKey Unique ID of the target file on the file system.
-   * @param ranges The ranges for which the data is being cached. These objects will not be stored.
-   * @param data The data for the corresponding ranges.
-   * @param baseOffset base offset for the ranges (stripe/stream offset in case of ORC).
-   * @return null if all data was put; bitmask indicating which chunks were not put otherwise;
-   *         the replacement chunks from cache are updated directly in the array.
-   */
-  long[] putFileData(Object fileKey, DiskRange[] ranges, MemoryBuffer[] data, long baseOffset);
-
-  /**
-   * Releases the buffer returned by getFileData/provided to putFileData back to cache.
-   * See respective javadocs for details.
-   */
-  void releaseBuffer(MemoryBuffer buffer);
-
-  /**
-   * Notifies the cache that the buffer returned from getFileData/provided to putFileData will
-   * be used by another consumer and therefore released multiple times (one more time per call).
-   */
-  void reuseBuffer(MemoryBuffer buffer);
-
-  /**
-   * Gets the allocator associated with this DataCache.
-   */
-  Allocator getAllocator();
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/common/io/DiskRange.java b/java/storage-api/src/java/org/apache/hadoop/hive/common/io/DiskRange.java
deleted file mode 100644
index 33aecf5..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/common/io/DiskRange.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.common.io;
-
-import java.nio.ByteBuffer;
-
-/**
- * The sections of a file.
- */
-public class DiskRange {
-  /** The first address. */
-  protected long offset;
-  /** The address afterwards. */
-  protected long end;
-
-  public DiskRange(long offset, long end) {
-    this.offset = offset;
-    this.end = end;
-    if (end < offset) {
-      throw new IllegalArgumentException("invalid range " + this);
-    }
-  }
-
-  @Override
-  public boolean equals(Object other) {
-    if (other == null || other.getClass() != getClass()) {
-      return false;
-    }
-    return equalRange((DiskRange) other);
-  }
-
-  public boolean equalRange(DiskRange other) {
-    return other.offset == offset && other.end == end;
-  }
-
-  @Override
-  public int hashCode() {
-    return (int)(offset ^ (offset >>> 32)) * 31 + (int)(end ^ (end >>> 32));
-  }
-
-  @Override
-  public String toString() {
-    return "range start: " + offset + " end: " + end;
-  }
-
-  public long getOffset() {
-    return offset;
-  }
-
-  public long getEnd() {
-    return end;
-  }
-
-  public int getLength() {
-    long len = this.end - this.offset;
-    assert len <= Integer.MAX_VALUE;
-    return (int)len;
-  }
-
-  // For subclasses
-  public boolean hasData() {
-    return false;
-  }
-
-  public DiskRange sliceAndShift(long offset, long end, long shiftBy) {
-    // Rather, unexpected usage exception.
-    throw new UnsupportedOperationException();
-  }
-
-  public ByteBuffer getData() {
-    throw new UnsupportedOperationException();
-  }
-
-  protected boolean merge(long otherOffset, long otherEnd) {
-    if (!overlap(offset, end, otherOffset, otherEnd)) return false;
-    offset = Math.min(offset, otherOffset);
-    end = Math.max(end, otherEnd);
-    return true;
-  }
-
-  private static boolean overlap(long leftA, long rightA, long leftB, long rightB) {
-    if (leftA <= leftB) {
-      return rightA >= leftB;
-    }
-    return rightB >= leftA;
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/common/io/DiskRangeList.java b/java/storage-api/src/java/org/apache/hadoop/hive/common/io/DiskRangeList.java
deleted file mode 100644
index 58d0bb9..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/common/io/DiskRangeList.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.common.io;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/** Java linked list iterator interface is convoluted, and moreover concurrent modifications
- * of the same list by multiple iterators are impossible. Hence, this.
- * Java also doesn't support multiple inheritance, so this cannot be done as "aspect"... */
-public class DiskRangeList extends DiskRange {
-  private static final Logger LOG = LoggerFactory.getLogger(DiskRangeList.class);
-  public DiskRangeList prev, next;
-
-  public DiskRangeList(long offset, long end) {
-    super(offset, end);
-  }
-
-  /** Replaces this element with another in the list; returns the new element. */
-  public DiskRangeList replaceSelfWith(DiskRangeList other) {
-    other.prev = this.prev;
-    other.next = this.next;
-    if (this.prev != null) {
-      this.prev.next = other;
-    }
-    if (this.next != null) {
-      this.next.prev = other;
-    }
-    this.next = this.prev = null;
-    return other;
-  }
-
-  /**
-   * Inserts an intersecting range before current in the list and adjusts offset accordingly.
-   * @return the new element.
-   */
-  public DiskRangeList insertPartBefore(DiskRangeList other) {
-    assert other.end >= this.offset;
-    this.offset = other.end;
-    other.prev = this.prev;
-    other.next = this;
-    if (this.prev != null) {
-      this.prev.next = other;
-    }
-    this.prev = other;
-    return other;
-  }
-
-  /**
-   * Inserts an element after current in the list.
-   * @return the new element.
-   * */
-  public DiskRangeList insertAfter(DiskRangeList other) {
-    other.next = this.next;
-    other.prev = this;
-    if (this.next != null) {
-      this.next.prev = other;
-    }
-    this.next = other;
-    return other;
-  }
-
-  /**
-   * Inserts an intersecting range after current in the list and adjusts offset accordingly.
-   * @return the new element.
-   */
-  public DiskRangeList insertPartAfter(DiskRangeList other) {
-    assert other.offset <= this.end;
-    this.end = other.offset;
-    return insertAfter(other);
-  }
-
-  /** Removes an element after current from the list. */
-  public void removeAfter() {
-    DiskRangeList other = this.next;
-    this.next = other.next;
-    if (this.next != null) {
-      this.next.prev = this;
-    }
-    other.next = other.prev = null;
-  }
-
-  /** Removes the current element from the list. */
-  public void removeSelf() {
-    if (this.prev != null) {
-      this.prev.next = this.next;
-    }
-    if (this.next != null) {
-      this.next.prev = this.prev;
-    }
-    this.next = this.prev = null;
-  }
-
-  /** Splits current element in the list, using DiskRange::slice */
-  public final DiskRangeList split(long cOffset) {
-    insertAfter((DiskRangeList)this.sliceAndShift(cOffset, end, 0));
-    return replaceSelfWith((DiskRangeList)this.sliceAndShift(offset, cOffset, 0));
-  }
-
-  public boolean hasContiguousNext() {
-    return next != null && end == next.offset;
-  }
-
-  // @VisibleForTesting
-  public int listSize() {
-    int result = 1;
-    DiskRangeList current = this.next;
-    while (current != null) {
-      ++result;
-      current = current.next;
-    }
-    return result;
-  }
-
-  public long getTotalLength() {
-    long totalLength = getLength();
-    DiskRangeList current = next;
-    while (current != null) {
-      totalLength += current.getLength();
-      current = current.next;
-    }
-    return totalLength;
-  }
-
-  // @VisibleForTesting
-  public DiskRangeList[] listToArray() {
-    DiskRangeList[] result = new DiskRangeList[listSize()];
-    int i = 0;
-    DiskRangeList current = this.next;
-    while (current != null) {
-      result[i] = current;
-      ++i;
-      current = current.next;
-    }
-    return result;
-  }
-
-  public static class CreateHelper {
-    private DiskRangeList tail = null, head;
-
-    public DiskRangeList getTail() {
-      return tail;
-    }
-
-    public void addOrMerge(long offset, long end, boolean doMerge, boolean doLogNew) {
-      if (doMerge && tail != null && tail.merge(offset, end)) return;
-      if (doLogNew) {
-        LOG.info("Creating new range; last range (which can include some previous adds) was "
-            + tail);
-      }
-      DiskRangeList node = new DiskRangeList(offset, end);
-      if (tail == null) {
-        head = tail = node;
-      } else {
-        tail = tail.insertAfter(node);
-      }
-    }
-
-    public DiskRangeList get() {
-      return head;
-    }
-
-    public DiskRangeList extract() {
-      DiskRangeList result = head;
-      head = null;
-      return result;
-    }
-  }
-
-  /**
-   * List in-place mutation helper - a bogus first element that is inserted before list head,
-   * and thus remains constant even if head is replaced with some new range via in-place list
-   * mutation. extract() can be used to obtain the modified list.
-   */
-  public static class MutateHelper extends DiskRangeList {
-    public MutateHelper(DiskRangeList head) {
-      super(-1, -1);
-      assert head != null;
-      assert head.prev == null;
-      this.next = head;
-      head.prev = this;
-    }
-
-    public DiskRangeList get() {
-      return next;
-    }
-
-    public DiskRangeList extract() {
-      DiskRangeList result = this.next;
-      assert result != null;
-      this.next = result.prev = null;
-      return result;
-    }
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/common/io/encoded/EncodedColumnBatch.java b/java/storage-api/src/java/org/apache/hadoop/hive/common/io/encoded/EncodedColumnBatch.java
deleted file mode 100644
index 13772c9..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/common/io/encoded/EncodedColumnBatch.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.common.io.encoded;
-
-import java.util.Arrays;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * A block of data for a given section of a file, similar to VRB but in encoded form.
- * Stores a set of buffers for each encoded stream that is a part of each column.
- */
-public class EncodedColumnBatch<BatchKey> {
-  /**
-   * Slice of the data for a stream for some column, stored inside MemoryBuffer's.
-   * ColumnStreamData can be reused for many EncodedColumnBatch-es (e.g. dictionary stream), so
-   * it tracks the number of such users via a refcount.
-   */
-  public static class ColumnStreamData {
-    private List<MemoryBuffer> cacheBuffers;
-    /** Base offset from the beginning of the indexable unit; for example, for ORC,
-     * offset from the CB in a compressed file, from the stream in uncompressed file. */
-    private int indexBaseOffset = 0;
-
-    /** Reference count. */
-    private AtomicInteger refCount = new AtomicInteger(0);
-
-    public void reset() {
-      cacheBuffers.clear();
-      refCount.set(0);
-      indexBaseOffset = 0;
-    }
-
-    public void incRef() {
-      refCount.incrementAndGet();
-    }
-
-    public int decRef() {
-      int i = refCount.decrementAndGet();
-      assert i >= 0;
-      return i;
-    }
-
-    public List<MemoryBuffer> getCacheBuffers() {
-      return cacheBuffers;
-    }
-
-    public void setCacheBuffers(List<MemoryBuffer> cacheBuffers) {
-      this.cacheBuffers = cacheBuffers;
-    }
-
-    public int getIndexBaseOffset() {
-      return indexBaseOffset;
-    }
-
-    public void setIndexBaseOffset(int indexBaseOffset) {
-      this.indexBaseOffset = indexBaseOffset;
-    }
-  }
-
-  /** The key that is used to map this batch to source location. */
-  protected BatchKey batchKey;
-  /**
-   * Stream data for each column that has true in the corresponding hasData position.
-   * For each column, streams are indexed by kind (for ORC), with missing elements being null.
-   */
-  protected ColumnStreamData[][] columnData;
-  /** Indicates which columns have data. Correspond to columnData elements. */
-  protected boolean[] hasData;
-
-  public void reset() {
-    if (hasData != null) {
-      Arrays.fill(hasData, false);
-    }
-    if (columnData == null) return;
-    for (int i = 0; i < columnData.length; ++i) {
-      if (columnData[i] == null) continue;
-      for (int j = 0; j < columnData[i].length; ++j) {
-        columnData[i][j] = null;
-      }
-    }
-  }
-
-  public void initColumn(int colIx, int streamCount) {
-    hasData[colIx] = true;
-    if (columnData[colIx] == null || columnData[colIx].length != streamCount) {
-      columnData[colIx] = new ColumnStreamData[streamCount];
-    }
-  }
-
-  public void setStreamData(int colIx, int streamIx, ColumnStreamData csd) {
-    assert hasData[colIx];
-    columnData[colIx][streamIx] = csd;
-  }
-
-  public BatchKey getBatchKey() {
-    return batchKey;
-  }
-
-  public ColumnStreamData[] getColumnData(int colIx) {
-    if (!hasData[colIx]) throw new AssertionError("No data for column " + colIx);
-    return columnData[colIx];
-  }
-
-  public int getTotalColCount() {
-    return columnData.length; // Includes the columns that have no data
-  }
-
-  protected void resetColumnArrays(int columnCount) {
-    if (hasData != null && columnCount == hasData.length) {
-      Arrays.fill(hasData, false);
-      return;
-    }
-    hasData = new boolean[columnCount];
-    ColumnStreamData[][] columnData = new ColumnStreamData[columnCount][];
-    if (this.columnData != null) {
-      for (int i = 0; i < Math.min(columnData.length, this.columnData.length); ++i) {
-        columnData[i] = this.columnData[i];
-      }
-    }
-    this.columnData = columnData;
-  }
-
-  public boolean hasData(int colIx) {
-    return hasData[colIx];
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/common/io/encoded/MemoryBuffer.java b/java/storage-api/src/java/org/apache/hadoop/hive/common/io/encoded/MemoryBuffer.java
deleted file mode 100644
index 4475009..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/common/io/encoded/MemoryBuffer.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.common.io.encoded;
-
-import java.nio.ByteBuffer;
-
-/** Abstract interface for any class wrapping a ByteBuffer. */
-public interface MemoryBuffer {
-  /** Note - raw buffer should not be modified. */
-  public ByteBuffer getByteBufferRaw();
-  public ByteBuffer getByteBufferDup();
-}
\ No newline at end of file
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java b/java/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java
deleted file mode 100644
index 1c6be91..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java
+++ /dev/null
@@ -1,332 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.common.type;
-
-import java.math.BigDecimal;
-import java.math.BigInteger;
-import java.math.RoundingMode;
-
-/**
- *
- * HiveDecimal. Simple wrapper for BigDecimal. Adds fixed max precision and non scientific string
- * representation
- *
- */
-public class HiveDecimal implements Comparable<HiveDecimal> {
-  public static final int MAX_PRECISION = 38;
-  public static final int MAX_SCALE = 38;
-
-  /**
-   * Default precision/scale when user doesn't specify in the column metadata, such as
-   * decimal and decimal(8).
-   */
-  public static final int USER_DEFAULT_PRECISION = 10;
-  public static final int USER_DEFAULT_SCALE = 0;
-
-  /**
-   *  Default precision/scale when system is not able to determine them, such as in case
-   *  of a non-generic udf.
-   */
-  public static final int SYSTEM_DEFAULT_PRECISION = 38;
-  public static final int SYSTEM_DEFAULT_SCALE = 18;
-
-  public static final HiveDecimal ZERO = new HiveDecimal(BigDecimal.ZERO);
-  public static final HiveDecimal ONE = new HiveDecimal(BigDecimal.ONE);
-
-  public static final int ROUND_FLOOR = BigDecimal.ROUND_FLOOR;
-  public static final int ROUND_CEILING = BigDecimal.ROUND_CEILING;
-  public static final int ROUND_HALF_UP = BigDecimal.ROUND_HALF_UP;
-  public static final int ROUND_HALF_EVEN = BigDecimal.ROUND_HALF_EVEN;
-
-  private BigDecimal bd = BigDecimal.ZERO;
-
-  private HiveDecimal(BigDecimal bd) {
-    this.bd = bd;
-  }
-
-  public static HiveDecimal create(BigDecimal b) {
-    return create(b, true);
-  }
-
-  public static HiveDecimal create(BigDecimal b, boolean allowRounding) {
-    BigDecimal bd = normalize(b, allowRounding);
-    return bd == null ? null : new HiveDecimal(bd);
-  }
-
-  public static HiveDecimal create(BigInteger unscaled, int scale) {
-    BigDecimal bd = normalize(new BigDecimal(unscaled, scale), true);
-    return bd == null ? null : new HiveDecimal(bd);
-  }
-
-  public static HiveDecimal create(String dec) {
-    BigDecimal bd;
-    try {
-      bd = new BigDecimal(dec.trim());
-    } catch (NumberFormatException ex) {
-      return null;
-    }
-
-    bd = normalize(bd, true);
-    return bd == null ? null : new HiveDecimal(bd);
-  }
-
-  public static HiveDecimal create(BigInteger bi) {
-    BigDecimal bd = normalize(new BigDecimal(bi), true);
-    return bd == null ? null : new HiveDecimal(bd);
-  }
-
-  public static HiveDecimal create(int i) {
-    return new HiveDecimal(new BigDecimal(i));
-  }
-
-  public static HiveDecimal create(long l) {
-    return new HiveDecimal(new BigDecimal(l));
-  }
-
-  @Override
-  public String toString() {
-     return bd.toPlainString();
-  }
-  
-  /**
-   * Return a string representation of the number with the number of decimal digits as
-   * the given scale. Please note that this is different from toString().
-   * @param scale the number of digits after the decimal point
-   * @return the string representation of exact number of decimal digits
-   */
-  public String toFormatString(int scale) {
-    return (bd.scale() == scale ? bd :
-      bd.setScale(scale, RoundingMode.HALF_UP)).toPlainString();
-  }
-
-  public HiveDecimal setScale(int i) {
-    return new HiveDecimal(bd.setScale(i, RoundingMode.HALF_UP));
-  }
-
-  @Override
-  public int compareTo(HiveDecimal dec) {
-    return bd.compareTo(dec.bd);
-  }
-
-  @Override
-  public int hashCode() {
-    return bd.hashCode();
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null || obj.getClass() != getClass()) {
-      return false;
-    }
-    return bd.equals(((HiveDecimal) obj).bd);
-  }
-
-  public int scale() {
-    return bd.scale();
-  }
-
-  /**
-   * Returns the number of digits (integer and fractional) in the number, which is equivalent
-   * to SQL decimal precision. Note that this is different from BigDecimal.precision(),
-   * which returns the precision of the unscaled value (BigDecimal.valueOf(0.01).precision() = 1,
-   * whereas HiveDecimal.create("0.01").precision() = 2).
-   * If you want the BigDecimal precision, use HiveDecimal.bigDecimalValue().precision()
-   * @return
-   */
-  public int precision() {
-    int bdPrecision = bd.precision();
-    int bdScale = bd.scale();
-
-    if (bdPrecision < bdScale) {
-      // This can happen for numbers less than 0.1
-      // For 0.001234: bdPrecision=4, bdScale=6
-      // In this case, we'll set the type to have the same precision as the scale.
-      return bdScale;
-    }
-    return bdPrecision;
-  }
-
-  public int intValue() {
-    return bd.intValue();
-  }
-
-  public double doubleValue() {
-    return bd.doubleValue();
-  }
-
-  public long longValue() {
-    return bd.longValue();
-  }
-
-  public short shortValue() {
-    return bd.shortValue();
-  }
-
-  public float floatValue() {
-    return bd.floatValue();
-  }
-
-  public BigDecimal bigDecimalValue() {
-    return bd;
-  }
-
-  public byte byteValue() {
-    return bd.byteValue();
-  }
-
-  public HiveDecimal setScale(int adjustedScale, int rm) {
-    return create(bd.setScale(adjustedScale, rm));
-  }
-
-  public HiveDecimal subtract(HiveDecimal dec) {
-    return create(bd.subtract(dec.bd));
-  }
-
-  public HiveDecimal multiply(HiveDecimal dec) {
-    return create(bd.multiply(dec.bd), false);
-  }
-
-  public BigInteger unscaledValue() {
-    return bd.unscaledValue();
-  }
-
-  public HiveDecimal scaleByPowerOfTen(int n) {
-    return create(bd.scaleByPowerOfTen(n));
-  }
-
-  public HiveDecimal abs() {
-    return create(bd.abs());
-  }
-
-  public HiveDecimal negate() {
-    return create(bd.negate());
-  }
-
-  public HiveDecimal add(HiveDecimal dec) {
-    return create(bd.add(dec.bd));
-  }
-
-  public HiveDecimal pow(int n) {
-    BigDecimal result = normalize(bd.pow(n), false);
-    return result == null ? null : new HiveDecimal(result);
-  }
-
-  public HiveDecimal remainder(HiveDecimal dec) {
-    return create(bd.remainder(dec.bd));
-  }
-
-  public HiveDecimal divide(HiveDecimal dec) {
-    return create(bd.divide(dec.bd, MAX_SCALE, RoundingMode.HALF_UP), true);
-  }
-
-  /**
-   * Get the sign of the underlying decimal.
-   * @return 0 if the decimal is equal to 0, -1 if less than zero, and 1 if greater than 0
-   */
-  public int signum() {
-    return bd.signum();
-  }
-
-  private static BigDecimal trim(BigDecimal d) {
-    if (d.compareTo(BigDecimal.ZERO) == 0) {
-      // Special case for 0, because java doesn't strip zeros correctly on that number.
-      d = BigDecimal.ZERO;
-    } else {
-      d = d.stripTrailingZeros();
-      if (d.scale() < 0) {
-        // no negative scale decimals
-        d = d.setScale(0);
-      }
-    }
-    return d;
-  }
-
-  private static BigDecimal normalize(BigDecimal bd, boolean allowRounding) {
-    if (bd == null) {
-      return null;
-    }
-
-    bd = trim(bd);
-
-    int intDigits = bd.precision() - bd.scale();
-
-    if (intDigits > MAX_PRECISION) {
-      return null;
-    }
-
-    int maxScale = Math.min(MAX_SCALE, Math.min(MAX_PRECISION - intDigits, bd.scale()));
-    if (bd.scale() > maxScale ) {
-      if (allowRounding) {
-        bd = bd.setScale(maxScale, RoundingMode.HALF_UP);
-        // Trimming is again necessary, because rounding may introduce new trailing 0's.
-        bd = trim(bd);
-      } else {
-        bd = null;
-      }
-    }
-
-    return bd;
-  }
-
-  private static BigDecimal enforcePrecisionScale(BigDecimal bd, int maxPrecision, int maxScale) {
-    if (bd == null) {
-      return null;
-    }
-
-    /**
-     * Specially handling the case that bd=0, and we are converting it to a type where precision=scale,
-     * such as decimal(1, 1).
-     */
-    if (bd.compareTo(BigDecimal.ZERO) == 0 && bd.scale() == 0 && maxPrecision == maxScale) {
-      return bd.setScale(maxScale);
-    }
-
-    bd = trim(bd);
-
-    if (bd.scale() > maxScale) {
-      bd = bd.setScale(maxScale, RoundingMode.HALF_UP);
-    }
-
-    int maxIntDigits = maxPrecision - maxScale;
-    int intDigits = bd.precision() - bd.scale();
-    if (intDigits > maxIntDigits) {
-      return null;
-    }
-
-    return bd;
-  }
-
-  public static HiveDecimal enforcePrecisionScale(HiveDecimal dec, int maxPrecision, int maxScale) {
-    if (dec == null) {
-      return null;
-    }
-
-    // Minor optimization, avoiding creating new objects.
-    if (dec.precision() - dec.scale() <= maxPrecision - maxScale &&
-        dec.scale() <= maxScale) {
-      return dec;
-    }
-
-    BigDecimal bd = enforcePrecisionScale(dec.bd, maxPrecision, maxScale);
-    if (bd == null) {
-      return null;
-    }
-
-    return HiveDecimal.create(bd);
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveIntervalDayTime.java b/java/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveIntervalDayTime.java
deleted file mode 100644
index b891e27..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveIntervalDayTime.java
+++ /dev/null
@@ -1,253 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.common.type;
-
-import java.math.BigDecimal;
-import java.sql.Timestamp;
-import java.util.Date;
-import java.util.concurrent.TimeUnit;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.commons.lang.builder.HashCodeBuilder;
-import org.apache.hive.common.util.IntervalDayTimeUtils;
-
-import sun.util.calendar.BaseCalendar;
-
-/**
- * Day-time interval type representing an offset in days/hours/minutes/seconds,
- * with nanosecond precision.
- * 1 day = 24 hours = 1440 minutes = 86400 seconds
- */
-public class HiveIntervalDayTime implements Comparable<HiveIntervalDayTime> {
-
-  // days/hours/minutes/seconds all represented as seconds
-  protected long totalSeconds;
-  protected int nanos;
-
-  public HiveIntervalDayTime() {
-  }
-
-  public HiveIntervalDayTime(int days, int hours, int minutes, int seconds, int nanos) {
-    set(days, hours, minutes, seconds, nanos);
-  }
-
-  public HiveIntervalDayTime(long seconds, int nanos) {
-    set(seconds, nanos);
-  }
-
-  public HiveIntervalDayTime(BigDecimal seconds) {
-    set(seconds);
-  }
-
-  public HiveIntervalDayTime(HiveIntervalDayTime other) {
-    set(other.totalSeconds, other.nanos);
-  }
-
-  public int getDays() {
-    return (int) TimeUnit.SECONDS.toDays(totalSeconds);
-  }
-
-  public int getHours() {
-    return (int) (TimeUnit.SECONDS.toHours(totalSeconds) % TimeUnit.DAYS.toHours(1));
-  }
-
-  public int getMinutes() {
-    return (int) (TimeUnit.SECONDS.toMinutes(totalSeconds) % TimeUnit.HOURS.toMinutes(1));
-  }
-
-  public int getSeconds() {
-    return (int) (totalSeconds % TimeUnit.MINUTES.toSeconds(1));
-  }
-
-  public int getNanos() {
-    return nanos;
-  }
-
-  /**
-   * Returns days/hours/minutes all converted into seconds.
-   * Nanos still need to be retrieved using getNanos()
-   * @return
-   */
-  public long getTotalSeconds() {
-    return totalSeconds;
-  }
-
-  /**
-   *
-   * @return double representation of the interval day time, accurate to nanoseconds
-   */
-  public double getDouble() {
-    return totalSeconds + nanos / 1000000000;
-  }
-
-  /**
-   * Ensures that the seconds and nanoseconds fields have consistent sign
-   */
-  protected void normalizeSecondsAndNanos() {
-    if (totalSeconds > 0 && nanos < 0) {
-      --totalSeconds;
-      nanos += IntervalDayTimeUtils.NANOS_PER_SEC;
-    } else if (totalSeconds < 0 && nanos > 0) {
-      ++totalSeconds;
-      nanos -= IntervalDayTimeUtils.NANOS_PER_SEC;
-    }
-  }
-
-  public void set(int days, int hours, int minutes, int seconds, int nanos) {
-    long totalSeconds = seconds;
-    totalSeconds += TimeUnit.DAYS.toSeconds(days);
-    totalSeconds += TimeUnit.HOURS.toSeconds(hours);
-    totalSeconds += TimeUnit.MINUTES.toSeconds(minutes);
-    totalSeconds += TimeUnit.NANOSECONDS.toSeconds(nanos);
-    nanos = nanos % IntervalDayTimeUtils.NANOS_PER_SEC;
-
-    this.totalSeconds = totalSeconds;
-    this.nanos = nanos;
-
-    normalizeSecondsAndNanos();
-  }
-
-  public void set(long seconds, int nanos) {
-    this.totalSeconds = seconds;
-    this.nanos = nanos;
-    normalizeSecondsAndNanos();
-  }
-
-  public void set(BigDecimal totalSecondsBd) {
-    long totalSeconds = totalSecondsBd.longValue();
-    BigDecimal fractionalSecs = totalSecondsBd.remainder(BigDecimal.ONE);
-    int nanos = fractionalSecs.multiply(IntervalDayTimeUtils.NANOS_PER_SEC_BD).intValue();
-    set(totalSeconds, nanos);
-  }
-
-  public void set(HiveIntervalDayTime other) {
-    set(other.getTotalSeconds(), other.getNanos());
-  }
-
-  public HiveIntervalDayTime negate() {
-    return new HiveIntervalDayTime(-getTotalSeconds(), -getNanos());
-  }
-
-  @Override
-  public int compareTo(HiveIntervalDayTime other) {
-    long cmp = this.totalSeconds - other.totalSeconds;
-    if (cmp == 0) {
-      cmp = this.nanos - other.nanos;
-    }
-    if (cmp != 0) {
-      cmp = cmp > 0 ? 1 : -1;
-    }
-    return (int) cmp;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (this == obj) {
-      return true;
-    }
-    if (!(obj instanceof HiveIntervalDayTime)) {
-      return false;
-    }
-    return 0 == compareTo((HiveIntervalDayTime) obj);
-  }
-
-  /**
-   * Return a copy of this object.
-   */
-  public Object clone() {
-      return new HiveIntervalDayTime(totalSeconds, nanos);
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder().append(totalSeconds).append(nanos).toHashCode();
-  }
-
-  @Override
-  public String toString() {
-    // If normalize() was used, then day-hour-minute-second-nanos should have the same sign.
-    // This is currently working with that assumption.
-    boolean isNegative = (totalSeconds < 0 || nanos < 0);
-    String daySecondSignStr = isNegative ? "-" : "";
-
-    return String.format("%s%d %02d:%02d:%02d.%09d",
-        daySecondSignStr, Math.abs(getDays()),
-        Math.abs(getHours()), Math.abs(getMinutes()),
-        Math.abs(getSeconds()), Math.abs(getNanos()));
-  }
-
-  public static HiveIntervalDayTime valueOf(String strVal) {
-    HiveIntervalDayTime result = null;
-    if (strVal == null) {
-      throw new IllegalArgumentException("Interval day-time string was null");
-    }
-    Matcher patternMatcher = PATTERN_MATCHER.get();
-    patternMatcher.reset(strVal);
-    if (patternMatcher.matches()) {
-      // Parse out the individual parts
-      try {
-        // Sign - whether interval is positive or negative
-        int sign = 1;
-        String field = patternMatcher.group(1);
-        if (field != null && field.equals("-")) {
-          sign = -1;
-        }
-        int days = sign *
-            IntervalDayTimeUtils.parseNumericValueWithRange("day", patternMatcher.group(2),
-                0, Integer.MAX_VALUE);
-        byte hours = (byte) (sign *
-            IntervalDayTimeUtils.parseNumericValueWithRange("hour", patternMatcher.group(3), 0, 23));
-        byte minutes = (byte) (sign *
-            IntervalDayTimeUtils.parseNumericValueWithRange("minute", patternMatcher.group(4), 0, 59));
-        int seconds = 0;
-        int nanos = 0;
-        field = patternMatcher.group(5);
-        if (field != null) {
-          BigDecimal bdSeconds = new BigDecimal(field);
-          if (bdSeconds.compareTo(IntervalDayTimeUtils.MAX_INT_BD) > 0) {
-            throw new IllegalArgumentException("seconds value of " + bdSeconds + " too large");
-          }
-          seconds = sign * bdSeconds.intValue();
-          nanos = sign * bdSeconds.subtract(new BigDecimal(bdSeconds.toBigInteger()))
-              .multiply(IntervalDayTimeUtils.NANOS_PER_SEC_BD).intValue();
-        }
-
-        result = new HiveIntervalDayTime(days, hours, minutes, seconds, nanos);
-      } catch (Exception err) {
-        throw new IllegalArgumentException("Error parsing interval day-time string: " + strVal, err);
-      }
-    } else {
-      throw new IllegalArgumentException(
-          "Interval string does not match day-time format of 'd h:m:s.n': " + strVal);
-    }
-
-    return result;
-  }
-
-  // Simple pattern: D H:M:S.nnnnnnnnn
-  private final static String PARSE_PATTERN =
-      "([+|-])?(\\d+) (\\d+):(\\d+):((\\d+)(\\.(\\d+))?)";
-
-  private static final ThreadLocal<Matcher> PATTERN_MATCHER = new ThreadLocal<Matcher>() {
-      @Override
-      protected Matcher initialValue() {
-        return Pattern.compile(PARSE_PATTERN).matcher("");
-      }
-  };
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/common/type/RandomTypeUtil.java b/java/storage-api/src/java/org/apache/hadoop/hive/common/type/RandomTypeUtil.java
deleted file mode 100644
index 53a7823..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/common/type/RandomTypeUtil.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.common.type;
-
-import java.sql.Date;
-import java.sql.Timestamp;
-import java.text.DateFormat;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Random;
-import java.util.concurrent.TimeUnit;
-
-public class RandomTypeUtil {
-
-  public static String getRandString(Random r) {
-    return getRandString(r, null, r.nextInt(10));
-  }
-
-  public static String getRandString(Random r, String characters, int length) {
-    if (characters == null) {
-      characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
-
-    }
-    StringBuilder sb = new StringBuilder();
-    for (int i = 0; i < length; i++) {
-      if (characters == null) {
-        sb.append((char) (r.nextInt(128)));
-      } else {
-        sb.append(characters.charAt(r.nextInt(characters.length())));
-      }
-    }
-    return sb.toString();
-  }
-
-  public static byte[] getRandBinary(Random r, int len){
-    byte[] bytes = new byte[len];
-    for (int j = 0; j < len; j++){
-      bytes[j] = Byte.valueOf((byte) r.nextInt());
-    }
-    return bytes;
-  }
-
-  private static final String DECIMAL_CHARS = "0123456789";
-
-  public static class HiveDecimalAndPrecisionScale {
-    public HiveDecimal hiveDecimal;
-    public int precision;
-    public int scale;
-
-    HiveDecimalAndPrecisionScale(HiveDecimal hiveDecimal, int precision, int scale) {
-      this.hiveDecimal = hiveDecimal;
-      this.precision = precision;
-      this.scale = scale;
-    }
-  }
-
-  public static HiveDecimalAndPrecisionScale getRandHiveDecimal(Random r) {
-    int precision;
-    int scale;
-    while (true) {
-      StringBuilder sb = new StringBuilder();
-      precision = 1 + r.nextInt(18);
-      scale = 0 + r.nextInt(precision + 1);
-
-      int integerDigits = precision - scale;
-
-      if (r.nextBoolean()) {
-        sb.append("-");
-      }
-
-      if (integerDigits == 0) {
-        sb.append("0");
-      } else {
-        sb.append(getRandString(r, DECIMAL_CHARS, integerDigits));
-      }
-      if (scale != 0) {
-        sb.append(".");
-        sb.append(getRandString(r, DECIMAL_CHARS, scale));
-      }
-
-      HiveDecimal bd = HiveDecimal.create(sb.toString());
-      precision = bd.precision();
-      scale = bd.scale();
-      if (scale > precision) {
-        // Sometimes weird decimals are produced?
-        continue;
-      }
-
-      // For now, punt.
-      precision = HiveDecimal.SYSTEM_DEFAULT_PRECISION;
-      scale = HiveDecimal.SYSTEM_DEFAULT_SCALE;
-      return new HiveDecimalAndPrecisionScale(bd, precision, scale);
-    }
-  }
-
-  public static Date getRandDate(Random r) {
-    String dateStr = String.format("%d-%02d-%02d",
-        Integer.valueOf(1800 + r.nextInt(500)),  // year
-        Integer.valueOf(1 + r.nextInt(12)),      // month
-        Integer.valueOf(1 + r.nextInt(28)));     // day
-    Date dateVal = Date.valueOf(dateStr);
-    return dateVal;
-  }
-
-  /**
-   * TIMESTAMP.
-   */
-
-  public static final long NANOSECONDS_PER_SECOND = TimeUnit.SECONDS.toNanos(1);
-  public static final long MILLISECONDS_PER_SECOND = TimeUnit.SECONDS.toMillis(1);
-  public static final long NANOSECONDS_PER_MILLISSECOND = TimeUnit.MILLISECONDS.toNanos(1);
-
-  private static ThreadLocal<DateFormat> DATE_FORMAT =
-      new ThreadLocal<DateFormat>() {
-        @Override
-        protected DateFormat initialValue() {
-          return new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
-        }
-      };
-
-  // We've switched to Joda/Java Calendar which has a more limited time range....
-  public static int MIN_YEAR = 1900;
-  public static int MAX_YEAR = 3000;
-  private static long MIN_FOUR_DIGIT_YEAR_MILLIS = parseToMillis("1900-01-01 00:00:00");
-  private static long MAX_FOUR_DIGIT_YEAR_MILLIS = parseToMillis("3000-01-01 00:00:00");
-
-  private static long parseToMillis(String s) {
-    try {
-      return DATE_FORMAT.get().parse(s).getTime();
-    } catch (ParseException ex) {
-      throw new RuntimeException(ex);
-    }
-  }
-
-  public static Timestamp getRandTimestamp(Random r) {
-    return getRandTimestamp(r, MIN_YEAR, MAX_YEAR);
-  }
-
-  public static Timestamp getRandTimestamp(Random r, int minYear, int maxYear) {
-    String optionalNanos = "";
-    switch (r.nextInt(4)) {
-    case 0:
-      // No nanos.
-      break;
-    case 1:
-      optionalNanos = String.format(".%09d",
-          Integer.valueOf(r.nextInt((int) NANOSECONDS_PER_SECOND)));
-      break;
-    case 2:
-      // Limit to milliseconds only...
-      optionalNanos = String.format(".%09d",
-          Integer.valueOf(r.nextInt((int) MILLISECONDS_PER_SECOND)) * NANOSECONDS_PER_MILLISSECOND);
-      break;
-    case 3:
-      // Limit to below milliseconds only...
-      optionalNanos = String.format(".%09d",
-          Integer.valueOf(r.nextInt((int) NANOSECONDS_PER_MILLISSECOND)));
-      break;
-    }
-    String timestampStr = String.format("%04d-%02d-%02d %02d:%02d:%02d%s",
-        Integer.valueOf(minYear + r.nextInt(maxYear - minYear + 1)),  // year
-        Integer.valueOf(1 + r.nextInt(12)),      // month
-        Integer.valueOf(1 + r.nextInt(28)),      // day
-        Integer.valueOf(0 + r.nextInt(24)),      // hour
-        Integer.valueOf(0 + r.nextInt(60)),      // minute
-        Integer.valueOf(0 + r.nextInt(60)),      // second
-        optionalNanos);
-    Timestamp timestampVal;
-    try {
-      timestampVal = Timestamp.valueOf(timestampStr);
-    } catch (Exception e) {
-      System.err.println("Timestamp string " + timestampStr + " did not parse");
-      throw e;
-    }
-    return timestampVal;
-  }
-
-  public static long randomMillis(long minMillis, long maxMillis, Random rand) {
-    return minMillis + (long) ((maxMillis - minMillis) * rand.nextDouble());
-  }
-
-  public static long randomMillis(Random rand) {
-    return randomMillis(MIN_FOUR_DIGIT_YEAR_MILLIS, MAX_FOUR_DIGIT_YEAR_MILLIS, rand);
-  }
-
-  public static int randomNanos(Random rand, int decimalDigits) {
-    // Only keep the most significant decimalDigits digits.
-    int nanos = rand.nextInt((int) NANOSECONDS_PER_SECOND);
-    return nanos - nanos % (int) Math.pow(10, 9 - decimalDigits);
-  }
-
-  public static int randomNanos(Random rand) {
-    return randomNanos(rand, 9);
-  }
-}
\ No newline at end of file
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/BytesColumnVector.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/BytesColumnVector.java
deleted file mode 100644
index 01c8fa2..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/BytesColumnVector.java
+++ /dev/null
@@ -1,389 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec.vector;
-
-
-/**
- * This class supports string and binary data by value reference -- i.e. each field is
- * explicitly present, as opposed to provided by a dictionary reference.
- * In some cases, all the values will be in the same byte array to begin with,
- * but this need not be the case. If each value is in a separate byte
- * array to start with, or not all of the values are in the same original
- * byte array, you can still assign data by reference into this column vector.
- * This gives flexibility to use this in multiple situations.
- * <p>
- * When setting data by reference, the caller
- * is responsible for allocating the byte arrays used to hold the data.
- * You can also set data by value, as long as you call the initBuffer() method first.
- * You can mix "by value" and "by reference" in the same column vector,
- * though that use is probably not typical.
- */
-public class BytesColumnVector extends ColumnVector {
-  public byte[][] vector;
-  public int[] start;          // start offset of each field
-
-  /*
-   * The length of each field. If the value repeats for every entry, then it is stored
-   * in vector[0] and isRepeating from the superclass is set to true.
-   */
-  public int[] length;
-  private byte[] buffer;   // optional buffer to use when actually copying in data
-  private int nextFree;    // next free position in buffer
-
-  // Estimate that there will be 16 bytes per entry
-  static final int DEFAULT_BUFFER_SIZE = 16 * VectorizedRowBatch.DEFAULT_SIZE;
-
-  // Proportion of extra space to provide when allocating more buffer space.
-  static final float EXTRA_SPACE_FACTOR = (float) 1.2;
-
-  /**
-   * Use this constructor for normal operation.
-   * All column vectors should be the default size normally.
-   */
-  public BytesColumnVector() {
-    this(VectorizedRowBatch.DEFAULT_SIZE);
-  }
-
-  /**
-   * Don't call this constructor except for testing purposes.
-   *
-   * @param size  number of elements in the column vector
-   */
-  public BytesColumnVector(int size) {
-    super(size);
-    vector = new byte[size][];
-    start = new int[size];
-    length = new int[size];
-  }
-
-  /**
-   * Additional reset work for BytesColumnVector (releasing scratch bytes for by value strings).
-   */
-  @Override
-  public void reset() {
-    super.reset();
-    initBuffer(0);
-  }
-
-  /** Set a field by reference.
-   *
-   * @param elementNum index within column vector to set
-   * @param sourceBuf container of source data
-   * @param start start byte position within source
-   * @param length  length of source byte sequence
-   */
-  public void setRef(int elementNum, byte[] sourceBuf, int start, int length) {
-    vector[elementNum] = sourceBuf;
-    this.start[elementNum] = start;
-    this.length[elementNum] = length;
-  }
-
-  /**
-   * You must call initBuffer first before using setVal().
-   * Provide the estimated number of bytes needed to hold
-   * a full column vector worth of byte string data.
-   *
-   * @param estimatedValueSize  Estimated size of buffer space needed
-   */
-  public void initBuffer(int estimatedValueSize) {
-    nextFree = 0;
-
-    // if buffer is already allocated, keep using it, don't re-allocate
-    if (buffer != null) {
-      return;
-    }
-
-    // allocate a little extra space to limit need to re-allocate
-    int bufferSize = this.vector.length * (int)(estimatedValueSize * EXTRA_SPACE_FACTOR);
-    if (bufferSize < DEFAULT_BUFFER_SIZE) {
-      bufferSize = DEFAULT_BUFFER_SIZE;
-    }
-    buffer = new byte[bufferSize];
-  }
-
-  /**
-   * Initialize buffer to default size.
-   */
-  public void initBuffer() {
-    initBuffer(0);
-  }
-
-  /**
-   * @return amount of buffer space currently allocated
-   */
-  public int bufferSize() {
-    if (buffer == null) {
-      return 0;
-    }
-    return buffer.length;
-  }
-
-  /**
-   * Set a field by actually copying in to a local buffer.
-   * If you must actually copy data in to the array, use this method.
-   * DO NOT USE this method unless it's not practical to set data by reference with setRef().
-   * Setting data by reference tends to run a lot faster than copying data in.
-   *
-   * @param elementNum index within column vector to set
-   * @param sourceBuf container of source data
-   * @param start start byte position within source
-   * @param length  length of source byte sequence
-   */
-  public void setVal(int elementNum, byte[] sourceBuf, int start, int length) {
-    if ((nextFree + length) > buffer.length) {
-      increaseBufferSpace(length);
-    }
-    System.arraycopy(sourceBuf, start, buffer, nextFree, length);
-    vector[elementNum] = buffer;
-    this.start[elementNum] = nextFree;
-    this.length[elementNum] = length;
-    nextFree += length;
-  }
-
-  /**
-   * Set a field by actually copying in to a local buffer.
-   * If you must actually copy data in to the array, use this method.
-   * DO NOT USE this method unless it's not practical to set data by reference with setRef().
-   * Setting data by reference tends to run a lot faster than copying data in.
-   *
-   * @param elementNum index within column vector to set
-   * @param sourceBuf container of source data
-   */
-  public void setVal(int elementNum, byte[] sourceBuf) {
-    setVal(elementNum, sourceBuf, 0, sourceBuf.length);
-  }
-
-  /**
-   * Set a field to the concatenation of two string values. Result data is copied
-   * into the internal buffer.
-   *
-   * @param elementNum index within column vector to set
-   * @param leftSourceBuf container of left argument
-   * @param leftStart start of left argument
-   * @param leftLen length of left argument
-   * @param rightSourceBuf container of right argument
-   * @param rightStart start of right argument
-   * @param rightLen length of right arugment
-   */
-  public void setConcat(int elementNum, byte[] leftSourceBuf, int leftStart, int leftLen,
-      byte[] rightSourceBuf, int rightStart, int rightLen) {
-    int newLen = leftLen + rightLen;
-    if ((nextFree + newLen) > buffer.length) {
-      increaseBufferSpace(newLen);
-    }
-    vector[elementNum] = buffer;
-    this.start[elementNum] = nextFree;
-    this.length[elementNum] = newLen;
-
-    System.arraycopy(leftSourceBuf, leftStart, buffer, nextFree, leftLen);
-    nextFree += leftLen;
-    System.arraycopy(rightSourceBuf, rightStart, buffer, nextFree, rightLen);
-    nextFree += rightLen;
-  }
-
-  /**
-   * Increase buffer space enough to accommodate next element.
-   * This uses an exponential increase mechanism to rapidly
-   * increase buffer size to enough to hold all data.
-   * As batches get re-loaded, buffer space allocated will quickly
-   * stabilize.
-   *
-   * @param nextElemLength size of next element to be added
-   */
-  public void increaseBufferSpace(int nextElemLength) {
-
-    // Keep doubling buffer size until there will be enough space for next element.
-    int newLength = 2 * buffer.length;
-    while((nextFree + nextElemLength) > newLength) {
-      newLength *= 2;
-    }
-
-    // Allocate new buffer, copy data to it, and set buffer to new buffer.
-    byte[] newBuffer = new byte[newLength];
-    System.arraycopy(buffer, 0, newBuffer, 0, nextFree);
-    buffer = newBuffer;
-  }
-
-  /** Copy the current object contents into the output. Only copy selected entries,
-    * as indicated by selectedInUse and the sel array.
-    */
-  public void copySelected(
-      boolean selectedInUse, int[] sel, int size, BytesColumnVector output) {
-
-    // Output has nulls if and only if input has nulls.
-    output.noNulls = noNulls;
-    output.isRepeating = false;
-
-    // Handle repeating case
-    if (isRepeating) {
-      output.setVal(0, vector[0], start[0], length[0]);
-      output.isNull[0] = isNull[0];
-      output.isRepeating = true;
-      return;
-    }
-
-    // Handle normal case
-
-    // Copy data values over
-    if (selectedInUse) {
-      for (int j = 0; j < size; j++) {
-        int i = sel[j];
-        output.setVal(i, vector[i], start[i], length[i]);
-      }
-    }
-    else {
-      for (int i = 0; i < size; i++) {
-        output.setVal(i, vector[i], start[i], length[i]);
-      }
-    }
-
-    // Copy nulls over if needed
-    if (!noNulls) {
-      if (selectedInUse) {
-        for (int j = 0; j < size; j++) {
-          int i = sel[j];
-          output.isNull[i] = isNull[i];
-        }
-      }
-      else {
-        System.arraycopy(isNull, 0, output.isNull, 0, size);
-      }
-    }
-  }
-
-  /** Simplify vector by brute-force flattening noNulls and isRepeating
-    * This can be used to reduce combinatorial explosion of code paths in VectorExpressions
-    * with many arguments, at the expense of loss of some performance.
-    */
-  public void flatten(boolean selectedInUse, int[] sel, int size) {
-    flattenPush();
-    if (isRepeating) {
-      isRepeating = false;
-
-      // setRef is used below and this is safe, because the reference
-      // is to data owned by this column vector. If this column vector
-      // gets re-used, the whole thing is re-used together so there
-      // is no danger of a dangling reference.
-
-      // Only copy data values if entry is not null. The string value
-      // at position 0 is undefined if the position 0 value is null.
-      if (noNulls || !isNull[0]) {
-
-        // loops start at position 1 because position 0 is already set
-        if (selectedInUse) {
-          for (int j = 1; j < size; j++) {
-            int i = sel[j];
-            this.setRef(i, vector[0], start[0], length[0]);
-          }
-        } else {
-          for (int i = 1; i < size; i++) {
-            this.setRef(i, vector[0], start[0], length[0]);
-          }
-        }
-      }
-      flattenRepeatingNulls(selectedInUse, sel, size);
-    }
-    flattenNoNulls(selectedInUse, sel, size);
-  }
-
-  // Fill the all the vector entries with provided value
-  public void fill(byte[] value) {
-    noNulls = true;
-    isRepeating = true;
-    setRef(0, value, 0, value.length);
-  }
-
-  // Fill the column vector with nulls
-  public void fillWithNulls() {
-    noNulls = false;
-    isRepeating = true;
-    vector[0] = null;
-    isNull[0] = true;
-  }
-
-  @Override
-  public void setElement(int outElementNum, int inputElementNum, ColumnVector inputVector) {
-    if (inputVector.isRepeating) {
-      inputElementNum = 0;
-    }
-    if (inputVector.noNulls || !inputVector.isNull[inputElementNum]) {
-      isNull[outElementNum] = false;
-      BytesColumnVector in = (BytesColumnVector) inputVector;
-      setVal(outElementNum, in.vector[inputElementNum],
-          in.start[inputElementNum], in.length[inputElementNum]);
-    } else {
-      isNull[outElementNum] = true;
-      noNulls = false;
-    }
-  }
-
-  @Override
-  public void init() {
-    initBuffer(0);
-  }
-
-  public String toString(int row) {
-    if (isRepeating) {
-      row = 0;
-    }
-    if (noNulls || !isNull[row]) {
-      return new String(vector[row], start[row], length[row]);
-    } else {
-      return null;
-    }
-  }
-
-  @Override
-  public void stringifyValue(StringBuilder buffer, int row) {
-    if (isRepeating) {
-      row = 0;
-    }
-    if (noNulls || !isNull[row]) {
-      buffer.append('"');
-      buffer.append(new String(vector[row], start[row], length[row]));
-      buffer.append('"');
-    } else {
-      buffer.append("null");
-    }
-  }
-
-  @Override
-  public void ensureSize(int size, boolean preserveData) {
-    super.ensureSize(size, preserveData);
-    if (size > vector.length) {
-      int[] oldStart = start;
-      start = new int[size];
-      int[] oldLength = length;
-      length = new int[size];
-      byte[][] oldVector = vector;
-      vector = new byte[size][];
-      if (preserveData) {
-        if (isRepeating) {
-          vector[0] = oldVector[0];
-          start[0] = oldStart[0];
-          length[0] = oldLength[0];
-        } else {
-          System.arraycopy(oldVector, 0, vector, 0, oldVector.length);
-          System.arraycopy(oldStart, 0, start, 0 , oldStart.length);
-          System.arraycopy(oldLength, 0, length, 0, oldLength.length);
-        }
-      }
-    }
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/ColumnVector.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/ColumnVector.java
deleted file mode 100644
index 6f090a1..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/ColumnVector.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec.vector;
-
-import java.util.Arrays;
-
-/**
- * ColumnVector contains the shared structure for the sub-types,
- * including NULL information, and whether this vector
- * repeats, i.e. has all values the same, so only the first
- * one is set. This is used to accelerate query performance
- * by handling a whole vector in O(1) time when applicable.
- *
- * The fields are public by design since this is a performance-critical
- * structure that is used in the inner loop of query execution.
- */
-public abstract class ColumnVector {
-
-  /*
-   * The current kinds of column vectors.
-   */
-  public static enum Type {
-    NONE,    // Useful when the type of column vector has not be determined yet.
-    LONG,
-    DOUBLE,
-    BYTES,
-    DECIMAL,
-    TIMESTAMP,
-    INTERVAL_DAY_TIME,
-    STRUCT,
-    LIST,
-    MAP,
-    UNION
-  }
-
-  /*
-   * If hasNulls is true, then this array contains true if the value
-   * is null, otherwise false. The array is always allocated, so a batch can be re-used
-   * later and nulls added.
-   */
-  public boolean[] isNull;
-
-  // If the whole column vector has no nulls, this is true, otherwise false.
-  public boolean noNulls;
-
-  /*
-   * True if same value repeats for whole column vector.
-   * If so, vector[0] holds the repeating value.
-   */
-  public boolean isRepeating;
-
-  // Variables to hold state from before flattening so it can be easily restored.
-  private boolean preFlattenIsRepeating;
-  private boolean preFlattenNoNulls;
-
-  /**
-   * Constructor for super-class ColumnVector. This is not called directly,
-   * but used to initialize inherited fields.
-   *
-   * @param len Vector length
-   */
-  public ColumnVector(int len) {
-    isNull = new boolean[len];
-    noNulls = true;
-    isRepeating = false;
-    preFlattenNoNulls = true;
-    preFlattenIsRepeating = false;
-  }
-
-  /**
-   * Resets the column to default state
-   *  - fills the isNull array with false
-   *  - sets noNulls to true
-   *  - sets isRepeating to false
-   */
-  public void reset() {
-    if (!noNulls) {
-      Arrays.fill(isNull, false);
-    }
-    noNulls = true;
-    isRepeating = false;
-    preFlattenNoNulls = true;
-    preFlattenIsRepeating = false;
-  }
-
-  /**
-   * Sets the isRepeating flag. Recurses over structs and unions so that the
-   * flags are set correctly.
-   * @param isRepeating
-   */
-  public void setRepeating(boolean isRepeating) {
-    this.isRepeating = isRepeating;
-  }
-
-  abstract public void flatten(boolean selectedInUse, int[] sel, int size);
-
-    // Simplify vector by brute-force flattening noNulls if isRepeating
-    // This can be used to reduce combinatorial explosion of code paths in VectorExpressions
-    // with many arguments.
-    protected void flattenRepeatingNulls(boolean selectedInUse, int[] sel,
-                                         int size) {
-
-      boolean nullFillValue;
-
-      if (noNulls) {
-        nullFillValue = false;
-      } else {
-        nullFillValue = isNull[0];
-      }
-
-      if (selectedInUse) {
-        for (int j = 0; j < size; j++) {
-          int i = sel[j];
-          isNull[i] = nullFillValue;
-        }
-      } else {
-        Arrays.fill(isNull, 0, size, nullFillValue);
-      }
-
-      // all nulls are now explicit
-      noNulls = false;
-    }
-
-    protected void flattenNoNulls(boolean selectedInUse, int[] sel,
-                                  int size) {
-      if (noNulls) {
-        noNulls = false;
-        if (selectedInUse) {
-          for (int j = 0; j < size; j++) {
-            isNull[sel[j]] = false;
-          }
-        } else {
-          Arrays.fill(isNull, 0, size, false);
-        }
-      }
-    }
-
-    /**
-     * Restore the state of isRepeating and noNulls to what it was
-     * before flattening. This must only be called just after flattening
-     * and then evaluating a VectorExpression on the column vector.
-     * It is an optimization that allows other operations on the same
-     * column to continue to benefit from the isRepeating and noNulls
-     * indicators.
-     */
-    public void unFlatten() {
-      isRepeating = preFlattenIsRepeating;
-      noNulls = preFlattenNoNulls;
-    }
-
-    // Record repeating and no nulls state to be restored later.
-    protected void flattenPush() {
-      preFlattenIsRepeating = isRepeating;
-      preFlattenNoNulls = noNulls;
-    }
-
-    /**
-     * Set the element in this column vector from the given input vector.
-     * This method can assume that the output does not have isRepeating set.
-     */
-    public abstract void setElement(int outElementNum, int inputElementNum,
-                                    ColumnVector inputVector);
-
-    /**
-     * Initialize the column vector. This method can be overridden by specific column vector types.
-     * Use this method only if the individual type of the column vector is not known, otherwise its
-     * preferable to call specific initialization methods.
-     */
-    public void init() {
-      // Do nothing by default
-    }
-
-    /**
-     * Ensure the ColumnVector can hold at least size values.
-     * This method is deliberately *not* recursive because the complex types
-     * can easily have more (or less) children than the upper levels.
-     * @param size the new minimum size
-     * @param preserveData should the old data be preserved?
-     */
-    public void ensureSize(int size, boolean preserveData) {
-      if (isNull.length < size) {
-        boolean[] oldArray = isNull;
-        isNull = new boolean[size];
-        if (preserveData && !noNulls) {
-          if (isRepeating) {
-            isNull[0] = oldArray[0];
-          } else {
-            System.arraycopy(oldArray, 0, isNull, 0, oldArray.length);
-          }
-        }
-      }
-    }
-
-    /**
-     * Print the value for this column into the given string builder.
-     * @param buffer the buffer to print into
-     * @param row the id of the row to print
-     */
-    public abstract void stringifyValue(StringBuilder buffer,
-                                        int row);
-  }
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java
deleted file mode 100644
index 2488631..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec.vector;
-import java.math.BigInteger;
-
-import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
-import org.apache.hadoop.hive.common.type.HiveDecimal;
-
-public class DecimalColumnVector extends ColumnVector {
-
-  /**
-   * A vector of HiveDecimalWritable objects.
-   *
-   * For high performance and easy access to this low-level structure,
-   * the fields are public by design (as they are in other ColumnVector
-   * types).
-   */
-  public HiveDecimalWritable[] vector;
-  public short scale;
-  public short precision;
-
-  public DecimalColumnVector(int precision, int scale) {
-    this(VectorizedRowBatch.DEFAULT_SIZE, precision, scale);
-  }
-
-  public DecimalColumnVector(int size, int precision, int scale) {
-    super(size);
-    this.precision = (short) precision;
-    this.scale = (short) scale;
-    vector = new HiveDecimalWritable[size];
-    for (int i = 0; i < size; i++) {
-      vector[i] = new HiveDecimalWritable(HiveDecimal.ZERO);
-    }
-  }
-
-  // Fill the all the vector entries with provided value
-  public void fill(HiveDecimal value) {
-    noNulls = true;
-    isRepeating = true;
-    if (vector[0] == null) {
-      vector[0] = new HiveDecimalWritable(value);
-    } else {
-      vector[0].set(value);
-    }
-  }
-
-  @Override
-  public void flatten(boolean selectedInUse, int[] sel, int size) {
-    // TODO Auto-generated method stub
-  }
-
-  @Override
-  public void setElement(int outElementNum, int inputElementNum, ColumnVector inputVector) {
-    if (inputVector.isRepeating) {
-      inputElementNum = 0;
-    }
-    if (inputVector.noNulls || !inputVector.isNull[inputElementNum]) {
-      HiveDecimal hiveDec =
-          ((DecimalColumnVector) inputVector).vector[inputElementNum]
-              .getHiveDecimal(precision, scale);
-      if (hiveDec == null) {
-        isNull[outElementNum] = true;
-        noNulls = false;
-      } else {
-        isNull[outElementNum] = false;
-        vector[outElementNum].set(hiveDec);
-      }
-    } else {
-      isNull[outElementNum] = true;
-      noNulls = false;
-    }
-  }
-
-  @Override
-  public void stringifyValue(StringBuilder buffer, int row) {
-    if (isRepeating) {
-      row = 0;
-    }
-    if (noNulls || !isNull[row]) {
-      buffer.append(vector[row].toString());
-    } else {
-      buffer.append("null");
-    }
-  }
-
-  public void set(int elementNum, HiveDecimalWritable writeable) {
-    if (writeable == null) {
-      noNulls = false;
-      isNull[elementNum] = true;
-    } else {
-      HiveDecimal hiveDec = writeable.getHiveDecimal(precision, scale);
-      if (hiveDec == null) {
-        noNulls = false;
-        isNull[elementNum] = true;
-      } else {
-        vector[elementNum].set(hiveDec);
-      }
-    }
-  }
-
-  public void set(int elementNum, HiveDecimal hiveDec) {
-    HiveDecimal checkedDec = HiveDecimal.enforcePrecisionScale(hiveDec, precision, scale);
-    if (checkedDec == null) {
-      noNulls = false;
-      isNull[elementNum] = true;
-    } else {
-      vector[elementNum].set(checkedDec);
-    }
-  }
-
-  public void setNullDataValue(int elementNum) {
-    // E.g. For scale 2 the minimum is "0.01"
-    HiveDecimal minimumNonZeroValue = HiveDecimal.create(BigInteger.ONE, scale);
-    vector[elementNum].set(minimumNonZeroValue);
-  }
-
-  @Override
-  public void ensureSize(int size, boolean preserveData) {
-    super.ensureSize(size, preserveData);
-    if (size <= vector.length) return; // We assume the existing vector is always valid.
-    HiveDecimalWritable[] oldArray = vector;
-    vector = new HiveDecimalWritable[size];
-    int initPos = 0;
-    if (preserveData) {
-      // we copy all of the values to avoid creating more objects
-      // TODO: it might be cheaper to always preserve data or reset existing objects
-      initPos = oldArray.length;
-      System.arraycopy(oldArray, 0, vector, 0 , oldArray.length);
-    }
-    for (int i = initPos; i < vector.length; ++i) {
-      vector[i] = new HiveDecimalWritable(HiveDecimal.ZERO);
-    }
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DoubleColumnVector.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DoubleColumnVector.java
deleted file mode 100644
index bd421f4..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DoubleColumnVector.java
+++ /dev/null
@@ -1,178 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.exec.vector;
-
-import java.io.IOException;
-import java.util.Arrays;
-
-/**
- * This class represents a nullable double precision floating point column vector.
- * This class will be used for operations on all floating point types (float, double)
- * and as such will use a 64-bit double value to hold the biggest possible value.
- * During copy-in/copy-out, smaller types (i.e. float) will be converted as needed. This will
- * reduce the amount of code that needs to be generated and also will run fast since the
- * machine operates with 64-bit words.
- *
- * The vector[] field is public by design for high-performance access in the inner
- * loop of query execution.
- */
-public class DoubleColumnVector extends ColumnVector {
-  public double[] vector;
-  public static final double NULL_VALUE = Double.NaN;
-
-  /**
-   * Use this constructor by default. All column vectors
-   * should normally be the default size.
-   */
-  public DoubleColumnVector() {
-    this(VectorizedRowBatch.DEFAULT_SIZE);
-  }
-
-  /**
-   * Don't use this except for testing purposes.
-   *
-   * @param len
-   */
-  public DoubleColumnVector(int len) {
-    super(len);
-    vector = new double[len];
-  }
-
-  // Copy the current object contents into the output. Only copy selected entries,
-  // as indicated by selectedInUse and the sel array.
-  public void copySelected(
-      boolean selectedInUse, int[] sel, int size, DoubleColumnVector output) {
-
-    // Output has nulls if and only if input has nulls.
-    output.noNulls = noNulls;
-    output.isRepeating = false;
-
-    // Handle repeating case
-    if (isRepeating) {
-      output.vector[0] = vector[0];
-      output.isNull[0] = isNull[0];
-      output.isRepeating = true;
-      return;
-    }
-
-    // Handle normal case
-
-    // Copy data values over
-    if (selectedInUse) {
-      for (int j = 0; j < size; j++) {
-        int i = sel[j];
-        output.vector[i] = vector[i];
-      }
-    }
-    else {
-      System.arraycopy(vector, 0, output.vector, 0, size);
-    }
-
-    // Copy nulls over if needed
-    if (!noNulls) {
-      if (selectedInUse) {
-        for (int j = 0; j < size; j++) {
-          int i = sel[j];
-          output.isNull[i] = isNull[i];
-        }
-      }
-      else {
-        System.arraycopy(isNull, 0, output.isNull, 0, size);
-      }
-    }
-  }
-
-  // Fill the column vector with the provided value
-  public void fill(double value) {
-    noNulls = true;
-    isRepeating = true;
-    vector[0] = value;
-  }
-
-  // Fill the column vector with nulls
-  public void fillWithNulls() {
-    noNulls = false;
-    isRepeating = true;
-    vector[0] = NULL_VALUE;
-    isNull[0] = true;
-  }
-
-  // Simplify vector by brute-force flattening noNulls and isRepeating
-  // This can be used to reduce combinatorial explosion of code paths in VectorExpressions
-  // with many arguments.
-  public void flatten(boolean selectedInUse, int[] sel, int size) {
-    flattenPush();
-    if (isRepeating) {
-      isRepeating = false;
-      double repeatVal = vector[0];
-      if (selectedInUse) {
-        for (int j = 0; j < size; j++) {
-          int i = sel[j];
-          vector[i] = repeatVal;
-        }
-      } else {
-        Arrays.fill(vector, 0, size, repeatVal);
-      }
-      flattenRepeatingNulls(selectedInUse, sel, size);
-    }
-    flattenNoNulls(selectedInUse, sel, size);
-  }
-
-  @Override
-  public void setElement(int outElementNum, int inputElementNum, ColumnVector inputVector) {
-    if (inputVector.isRepeating) {
-      inputElementNum = 0;
-    }
-    if (inputVector.noNulls || !inputVector.isNull[inputElementNum]) {
-      isNull[outElementNum] = false;
-      vector[outElementNum] =
-          ((DoubleColumnVector) inputVector).vector[inputElementNum];
-    } else {
-      isNull[outElementNum] = true;
-      noNulls = false;
-    }
-  }
-
-  @Override
-  public void stringifyValue(StringBuilder buffer, int row) {
-    if (isRepeating) {
-      row = 0;
-    }
-    if (noNulls || !isNull[row]) {
-      buffer.append(vector[row]);
-    } else {
-      buffer.append("null");
-    }
-  }
-
-  @Override
-  public void ensureSize(int size, boolean preserveData) {
-    super.ensureSize(size, preserveData);
-    if (size > vector.length) {
-      double[] oldArray = vector;
-      vector = new double[size];
-      if (preserveData) {
-        if (isRepeating) {
-          vector[0] = oldArray[0];
-        } else {
-          System.arraycopy(oldArray, 0, vector, 0 , oldArray.length);
-        }
-      }
-    }
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/IntervalDayTimeColumnVector.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/IntervalDayTimeColumnVector.java
deleted file mode 100644
index c4a6c0f..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/IntervalDayTimeColumnVector.java
+++ /dev/null
@@ -1,367 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.exec.vector;
-
-import java.util.Arrays;
-
-import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
-import org.apache.hadoop.io.Writable;
-
-/**
- * This class represents a nullable interval day time column vector capable of handing a
- * wide range of interval day time values.
- *
- * We store the 2 (value) fields of a HiveIntervalDayTime class in primitive arrays.
- *
- * We do this to avoid an array of Java HiveIntervalDayTime objects which would have poor storage
- * and memory access characteristics.
- *
- * Generally, the caller will fill in a scratch HiveIntervalDayTime object with values from a row,
- * work using the scratch HiveIntervalDayTime, and then perhaps update the column vector row
- * with a result.
- */
-public class IntervalDayTimeColumnVector extends ColumnVector {
-
-  /*
-   * The storage arrays for this column vector corresponds to the storage of a HiveIntervalDayTime:
-   */
-  private long[] totalSeconds;
-      // The values from HiveIntervalDayTime.getTotalSeconds().
-
-  private int[] nanos;
-      // The values from HiveIntervalDayTime.getNanos().
-
-  /*
-   * Scratch objects.
-   */
-  private final HiveIntervalDayTime scratchIntervalDayTime;
-
-  private Writable scratchWritable;
-      // Supports keeping a HiveIntervalDayTimeWritable object without having to import
-      // that definition...
-
-  /**
-   * Use this constructor by default. All column vectors
-   * should normally be the default size.
-   */
-  public IntervalDayTimeColumnVector() {
-    this(VectorizedRowBatch.DEFAULT_SIZE);
-  }
-
-  /**
-   * Don't use this except for testing purposes.
-   *
-   * @param len the number of rows
-   */
-  public IntervalDayTimeColumnVector(int len) {
-    super(len);
-
-    totalSeconds = new long[len];
-    nanos = new int[len];
-
-    scratchIntervalDayTime = new HiveIntervalDayTime();
-
-    scratchWritable = null;     // Allocated by caller.
-  }
-
-  /**
-   * Return the number of rows.
-   * @return
-   */
-  public int getLength() {
-    return totalSeconds.length;
-  }
-
-  /**
-   * Return a row's HiveIntervalDayTime.getTotalSeconds() value.
-   * We assume the entry has already been NULL checked and isRepeated adjusted.
-   * @param elementNum
-   * @return
-   */
-  public long getTotalSeconds(int elementNum) {
-    return totalSeconds[elementNum];
-  }
-
-  /**
-   * Return a row's HiveIntervalDayTime.getNanos() value.
-   * We assume the entry has already been NULL checked and isRepeated adjusted.
-   * @param elementNum
-   * @return
-   */
-  public long getNanos(int elementNum) {
-    return nanos[elementNum];
-  }
-
-  /**
-   * Return a row's HiveIntervalDayTime.getDouble() value.
-   * We assume the entry has already been NULL checked and isRepeated adjusted.
-   * @param elementNum
-   * @return
-   */
-  public double getDouble(int elementNum) {
-    return asScratchIntervalDayTime(elementNum).getDouble();
-  }
-
-  /**
-   * Set a HiveIntervalDayTime object from a row of the column.
-   * We assume the entry has already been NULL checked and isRepeated adjusted.
-   * @param intervalDayTime
-   * @param elementNum
-   */
-  public void intervalDayTimeUpdate(HiveIntervalDayTime intervalDayTime, int elementNum) {
-    intervalDayTime.set(totalSeconds[elementNum], nanos[elementNum]);
-  }
-
-
-  /**
-   * Return the scratch HiveIntervalDayTime object set from a row.
-   * We assume the entry has already been NULL checked and isRepeated adjusted.
-   * @param elementNum
-   * @return
-   */
-  public HiveIntervalDayTime asScratchIntervalDayTime(int elementNum) {
-    scratchIntervalDayTime.set(totalSeconds[elementNum], nanos[elementNum]);
-    return scratchIntervalDayTime;
-  }
-
-  /**
-   * Return the scratch HiveIntervalDayTime (contents undefined).
-   * @return
-   */
-  public HiveIntervalDayTime getScratchIntervalDayTime() {
-    return scratchIntervalDayTime;
-  }
-
-  /**
-   * Compare row to HiveIntervalDayTime.
-   * We assume the entry has already been NULL checked and isRepeated adjusted.
-   * @param elementNum
-   * @param intervalDayTime
-   * @return -1, 0, 1 standard compareTo values.
-   */
-  public int compareTo(int elementNum, HiveIntervalDayTime intervalDayTime) {
-    return asScratchIntervalDayTime(elementNum).compareTo(intervalDayTime);
-  }
-
-  /**
-   * Compare HiveIntervalDayTime to row.
-   * We assume the entry has already been NULL checked and isRepeated adjusted.
-   * @param intervalDayTime
-   * @param elementNum
-   * @return -1, 0, 1 standard compareTo values.
-   */
-  public int compareTo(HiveIntervalDayTime intervalDayTime, int elementNum) {
-    return intervalDayTime.compareTo(asScratchIntervalDayTime(elementNum));
-  }
-
-  /**
-   * Compare a row to another TimestampColumnVector's row.
-   * @param elementNum1
-   * @param intervalDayTimeColVector2
-   * @param elementNum2
-   * @return
-   */
-  public int compareTo(int elementNum1, IntervalDayTimeColumnVector intervalDayTimeColVector2,
-      int elementNum2) {
-    return asScratchIntervalDayTime(elementNum1).compareTo(
-        intervalDayTimeColVector2.asScratchIntervalDayTime(elementNum2));
-  }
-
-  /**
-   * Compare another TimestampColumnVector's row to a row.
-   * @param intervalDayTimeColVector1
-   * @param elementNum1
-   * @param elementNum2
-   * @return
-   */
-  public int compareTo(IntervalDayTimeColumnVector intervalDayTimeColVector1, int elementNum1,
-      int elementNum2) {
-    return intervalDayTimeColVector1.asScratchIntervalDayTime(elementNum1).compareTo(
-        asScratchIntervalDayTime(elementNum2));
-  }
-
-  @Override
-  public void setElement(int outElementNum, int inputElementNum, ColumnVector inputVector) {
-
-    IntervalDayTimeColumnVector timestampColVector = (IntervalDayTimeColumnVector) inputVector;
-
-    totalSeconds[outElementNum] = timestampColVector.totalSeconds[inputElementNum];
-    nanos[outElementNum] = timestampColVector.nanos[inputElementNum];
-  }
-
-  // Simplify vector by brute-force flattening noNulls and isRepeating
-  // This can be used to reduce combinatorial explosion of code paths in VectorExpressions
-  // with many arguments.
-  public void flatten(boolean selectedInUse, int[] sel, int size) {
-    flattenPush();
-    if (isRepeating) {
-      isRepeating = false;
-      long repeatFastTime = totalSeconds[0];
-      int repeatNanos = nanos[0];
-      if (selectedInUse) {
-        for (int j = 0; j < size; j++) {
-          int i = sel[j];
-          totalSeconds[i] = repeatFastTime;
-          nanos[i] = repeatNanos;
-        }
-      } else {
-        Arrays.fill(totalSeconds, 0, size, repeatFastTime);
-        Arrays.fill(nanos, 0, size, repeatNanos);
-      }
-      flattenRepeatingNulls(selectedInUse, sel, size);
-    }
-    flattenNoNulls(selectedInUse, sel, size);
-  }
-
-  /**
-   * Set a row from a HiveIntervalDayTime.
-   * We assume the entry has already been isRepeated adjusted.
-   * @param elementNum
-   * @param intervalDayTime
-   */
-  public void set(int elementNum, HiveIntervalDayTime intervalDayTime) {
-    this.totalSeconds[elementNum] = intervalDayTime.getTotalSeconds();
-    this.nanos[elementNum] = intervalDayTime.getNanos();
-  }
-
-  /**
-   * Set a row from the current value in the scratch interval day time.
-   * @param elementNum
-   */
-  public void setFromScratchIntervalDayTime(int elementNum) {
-    this.totalSeconds[elementNum] = scratchIntervalDayTime.getTotalSeconds();
-    this.nanos[elementNum] = scratchIntervalDayTime.getNanos();
-  }
-
-  /**
-   * Set row to standard null value(s).
-   * We assume the entry has already been isRepeated adjusted.
-   * @param elementNum
-   */
-  public void setNullValue(int elementNum) {
-    totalSeconds[elementNum] = 0;
-    nanos[elementNum] = 1;
-  }
-
-  // Copy the current object contents into the output. Only copy selected entries,
-  // as indicated by selectedInUse and the sel array.
-  public void copySelected(
-      boolean selectedInUse, int[] sel, int size, IntervalDayTimeColumnVector output) {
-
-    // Output has nulls if and only if input has nulls.
-    output.noNulls = noNulls;
-    output.isRepeating = false;
-
-    // Handle repeating case
-    if (isRepeating) {
-      output.totalSeconds[0] = totalSeconds[0];
-      output.nanos[0] = nanos[0];
-      output.isNull[0] = isNull[0];
-      output.isRepeating = true;
-      return;
-    }
-
-    // Handle normal case
-
-    // Copy data values over
-    if (selectedInUse) {
-      for (int j = 0; j < size; j++) {
-        int i = sel[j];
-        output.totalSeconds[i] = totalSeconds[i];
-        output.nanos[i] = nanos[i];
-      }
-    }
-    else {
-      System.arraycopy(totalSeconds, 0, output.totalSeconds, 0, size);
-      System.arraycopy(nanos, 0, output.nanos, 0, size);
-    }
-
-    // Copy nulls over if needed
-    if (!noNulls) {
-      if (selectedInUse) {
-        for (int j = 0; j < size; j++) {
-          int i = sel[j];
-          output.isNull[i] = isNull[i];
-        }
-      }
-      else {
-        System.arraycopy(isNull, 0, output.isNull, 0, size);
-      }
-    }
-  }
-
-  /**
-   * Fill all the vector entries with a HiveIntervalDayTime.
-   * @param intervalDayTime
-   */
-  public void fill(HiveIntervalDayTime intervalDayTime) {
-    noNulls = true;
-    isRepeating = true;
-    totalSeconds[0] = intervalDayTime.getTotalSeconds();
-    nanos[0] = intervalDayTime.getNanos();
-  }
-
-  /**
-   * Return a convenience writable object stored by this column vector.
-   * Supports keeping a TimestampWritable object without having to import that definition...
-   * @return
-   */
-  public Writable getScratchWritable() {
-    return scratchWritable;
-  }
-
-  /**
-   * Set the convenience writable object stored by this column vector
-   * @param scratchWritable
-   */
-  public void setScratchWritable(Writable scratchWritable) {
-    this.scratchWritable = scratchWritable;
-  }
-
-  @Override
-  public void stringifyValue(StringBuilder buffer, int row) {
-    if (isRepeating) {
-      row = 0;
-    }
-    if (noNulls || !isNull[row]) {
-      scratchIntervalDayTime.set(totalSeconds[row], nanos[row]);
-      buffer.append(scratchIntervalDayTime.toString());
-    } else {
-      buffer.append("null");
-    }
-  }
-
-  @Override
-  public void ensureSize(int size, boolean preserveData) {
-    super.ensureSize(size, preserveData);
-    if (size <= totalSeconds.length) return;
-    long[] oldTime = totalSeconds;
-    int[] oldNanos = nanos;
-    totalSeconds = new long[size];
-    nanos = new int[size];
-    if (preserveData) {
-      if (isRepeating) {
-        totalSeconds[0] = oldTime[0];
-        nanos[0] = oldNanos[0];
-      } else {
-        System.arraycopy(oldTime, 0, totalSeconds, 0, oldTime.length);
-        System.arraycopy(oldNanos, 0, nanos, 0, oldNanos.length);
-      }
-    }
-  }
-}
\ No newline at end of file
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/ListColumnVector.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/ListColumnVector.java
deleted file mode 100644
index 66240dd..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/ListColumnVector.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec.vector;
-
-/**
- * The representation of a vectorized column of list objects.
- *
- * Each list is composed of a range of elements in the underlying child
- * ColumnVector. The range for list i is
- * offsets[i]..offsets[i]+lengths[i]-1 inclusive.
- */
-public class ListColumnVector extends MultiValuedColumnVector {
-
-  public ColumnVector child;
-
-  public ListColumnVector() {
-    this(VectorizedRowBatch.DEFAULT_SIZE, null);
-  }
-
-  /**
-   * Constructor for ListColumnVector.
-   *
-   * @param len Vector length
-   * @param child The child vector
-   */
-  public ListColumnVector(int len, ColumnVector child) {
-    super(len);
-    this.child = child;
-  }
-
-  @Override
-  protected void childFlatten(boolean useSelected, int[] selected, int size) {
-    child.flatten(useSelected, selected, size);
-  }
-
-  @Override
-  public void setElement(int outElementNum, int inputElementNum,
-                         ColumnVector inputVector) {
-    ListColumnVector input = (ListColumnVector) inputVector;
-    if (input.isRepeating) {
-      inputElementNum = 0;
-    }
-    if (!input.noNulls && input.isNull[inputElementNum]) {
-      isNull[outElementNum] = true;
-      noNulls = false;
-    } else {
-      isNull[outElementNum] = false;
-      int offset = childCount;
-      int length = (int) input.lengths[inputElementNum];
-      int inputOffset = (int) input.offsets[inputElementNum];
-      offsets[outElementNum] = offset;
-      childCount += length;
-      lengths[outElementNum] = length;
-      child.ensureSize(childCount, true);
-      for (int i = 0; i < length; ++i) {
-        child.setElement(i + offset, inputOffset + i, input.child);
-      }
-    }
-  }
-
-  @Override
-  public void stringifyValue(StringBuilder buffer, int row) {
-    if (isRepeating) {
-      row = 0;
-    }
-    if (noNulls || !isNull[row]) {
-      buffer.append('[');
-      boolean isFirst = true;
-      for(long i=offsets[row]; i < offsets[row] + lengths[row]; ++i) {
-        if (isFirst) {
-          isFirst = false;
-        } else {
-          buffer.append(", ");
-        }
-        child.stringifyValue(buffer, (int) i);
-      }
-      buffer.append(']');
-    } else {
-      buffer.append("null");
-    }
-  }
-
-  @Override
-  public void init() {
-    super.init();
-    child.init();
-  }
-
-  @Override
-  public void reset() {
-    super.reset();
-    child.reset();
-  }
-
-  @Override
-  public void unFlatten() {
-    super.unFlatten();
-    if (!isRepeating || noNulls || !isNull[0]) {
-      child.unFlatten();
-    }
-  }
-
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/LongColumnVector.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/LongColumnVector.java
deleted file mode 100644
index 80d4731..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/LongColumnVector.java
+++ /dev/null
@@ -1,224 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.exec.vector;
-
-import java.io.IOException;
-import java.util.Arrays;
-
-/**
- * This class represents a nullable int column vector.
- * This class will be used for operations on all integer types (tinyint, smallint, int, bigint)
- * and as such will use a 64-bit long value to hold the biggest possible value.
- * During copy-in/copy-out, smaller int types will be converted as needed. This will
- * reduce the amount of code that needs to be generated and also will run fast since the
- * machine operates with 64-bit words.
- *
- * The vector[] field is public by design for high-performance access in the inner
- * loop of query execution.
- */
-public class LongColumnVector extends ColumnVector {
-  public long[] vector;
-  public static final long NULL_VALUE = 1;
-
-  /**
-   * Use this constructor by default. All column vectors
-   * should normally be the default size.
-   */
-  public LongColumnVector() {
-    this(VectorizedRowBatch.DEFAULT_SIZE);
-  }
-
-  /**
-   * Don't use this except for testing purposes.
-   *
-   * @param len the number of rows
-   */
-  public LongColumnVector(int len) {
-    super(len);
-    vector = new long[len];
-  }
-
-  // Copy the current object contents into the output. Only copy selected entries,
-  // as indicated by selectedInUse and the sel array.
-  public void copySelected(
-      boolean selectedInUse, int[] sel, int size, LongColumnVector output) {
-
-    // Output has nulls if and only if input has nulls.
-    output.noNulls = noNulls;
-    output.isRepeating = false;
-
-    // Handle repeating case
-    if (isRepeating) {
-      output.vector[0] = vector[0];
-      output.isNull[0] = isNull[0];
-      output.isRepeating = true;
-      return;
-    }
-
-    // Handle normal case
-
-    // Copy data values over
-    if (selectedInUse) {
-      for (int j = 0; j < size; j++) {
-        int i = sel[j];
-        output.vector[i] = vector[i];
-      }
-    }
-    else {
-      System.arraycopy(vector, 0, output.vector, 0, size);
-    }
-
-    // Copy nulls over if needed
-    if (!noNulls) {
-      if (selectedInUse) {
-        for (int j = 0; j < size; j++) {
-          int i = sel[j];
-          output.isNull[i] = isNull[i];
-        }
-      }
-      else {
-        System.arraycopy(isNull, 0, output.isNull, 0, size);
-      }
-    }
-  }
-
-  // Copy the current object contents into the output. Only copy selected entries,
-  // as indicated by selectedInUse and the sel array.
-  public void copySelected(
-      boolean selectedInUse, int[] sel, int size, DoubleColumnVector output) {
-
-    // Output has nulls if and only if input has nulls.
-    output.noNulls = noNulls;
-    output.isRepeating = false;
-
-    // Handle repeating case
-    if (isRepeating) {
-      output.vector[0] = vector[0];  // automatic conversion to double is done here
-      output.isNull[0] = isNull[0];
-      output.isRepeating = true;
-      return;
-    }
-
-    // Handle normal case
-
-    // Copy data values over
-    if (selectedInUse) {
-      for (int j = 0; j < size; j++) {
-        int i = sel[j];
-        output.vector[i] = vector[i];
-      }
-    }
-    else {
-      for(int i = 0; i < size; ++i) {
-        output.vector[i] = vector[i];
-      }
-    }
-
-    // Copy nulls over if needed
-    if (!noNulls) {
-      if (selectedInUse) {
-        for (int j = 0; j < size; j++) {
-          int i = sel[j];
-          output.isNull[i] = isNull[i];
-        }
-      }
-      else {
-        System.arraycopy(isNull, 0, output.isNull, 0, size);
-      }
-    }
-  }
-
-  // Fill the column vector with the provided value
-  public void fill(long value) {
-    noNulls = true;
-    isRepeating = true;
-    vector[0] = value;
-  }
-
-  // Fill the column vector with nulls
-  public void fillWithNulls() {
-    noNulls = false;
-    isRepeating = true;
-    vector[0] = NULL_VALUE;
-    isNull[0] = true;
-  }
-
-  // Simplify vector by brute-force flattening noNulls and isRepeating
-  // This can be used to reduce combinatorial explosion of code paths in VectorExpressions
-  // with many arguments.
-  public void flatten(boolean selectedInUse, int[] sel, int size) {
-    flattenPush();
-    if (isRepeating) {
-      isRepeating = false;
-      long repeatVal = vector[0];
-      if (selectedInUse) {
-        for (int j = 0; j < size; j++) {
-          int i = sel[j];
-          vector[i] = repeatVal;
-        }
-      } else {
-        Arrays.fill(vector, 0, size, repeatVal);
-      }
-      flattenRepeatingNulls(selectedInUse, sel, size);
-    }
-    flattenNoNulls(selectedInUse, sel, size);
-  }
-
-  @Override
-  public void setElement(int outElementNum, int inputElementNum, ColumnVector inputVector) {
-    if (inputVector.isRepeating) {
-      inputElementNum = 0;
-    }
-    if (inputVector.noNulls || !inputVector.isNull[inputElementNum]) {
-      isNull[outElementNum] = false;
-      vector[outElementNum] =
-          ((LongColumnVector) inputVector).vector[inputElementNum];
-    } else {
-      isNull[outElementNum] = true;
-      noNulls = false;
-    }
-  }
-
-  @Override
-  public void stringifyValue(StringBuilder buffer, int row) {
-    if (isRepeating) {
-      row = 0;
-    }
-    if (noNulls || !isNull[row]) {
-      buffer.append(vector[row]);
-    } else {
-      buffer.append("null");
-    }
-  }
-
-  @Override
-  public void ensureSize(int size, boolean preserveData) {
-    super.ensureSize(size, preserveData);
-    if (size > vector.length) {
-      long[] oldArray = vector;
-      vector = new long[size];
-      if (preserveData) {
-        if (isRepeating) {
-          vector[0] = oldArray[0];
-        } else {
-          System.arraycopy(oldArray, 0, vector, 0 , oldArray.length);
-        }
-      }
-    }
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/MapColumnVector.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/MapColumnVector.java
deleted file mode 100644
index e8421e3..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/MapColumnVector.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec.vector;
-
-/**
- * The representation of a vectorized column of map objects.
- *
- * Each map is composed of a range of elements in the underlying child
- * ColumnVector. The range for map i is
- * offsets[i]..offsets[i]+lengths[i]-1 inclusive.
- */
-public class MapColumnVector extends MultiValuedColumnVector {
-
-  public ColumnVector keys;
-  public ColumnVector values;
-
-  public MapColumnVector() {
-    this(VectorizedRowBatch.DEFAULT_SIZE, null, null);
-  }
-
-  /**
-   * Constructor for MapColumnVector
-   *
-   * @param len Vector length
-   * @param keys The keys column vector
-   * @param values The values column vector
-   */
-  public MapColumnVector(int len, ColumnVector keys, ColumnVector values) {
-    super(len);
-    this.keys = keys;
-    this.values = values;
-  }
-
-  @Override
-  protected void childFlatten(boolean useSelected, int[] selected, int size) {
-    keys.flatten(useSelected, selected, size);
-    values.flatten(useSelected, selected, size);
-  }
-
-  @Override
-  public void setElement(int outElementNum, int inputElementNum,
-                         ColumnVector inputVector) {
-    if (inputVector.isRepeating) {
-      inputElementNum = 0;
-    }
-    if (!inputVector.noNulls && inputVector.isNull[inputElementNum]) {
-      isNull[outElementNum] = true;
-      noNulls = false;
-    } else {
-      MapColumnVector input = (MapColumnVector) inputVector;
-      isNull[outElementNum] = false;
-      int offset = childCount;
-      int length = (int) input.lengths[inputElementNum];
-      int inputOffset = (int) input.offsets[inputElementNum];
-      offsets[outElementNum] = offset;
-      childCount += length;
-      lengths[outElementNum] = length;
-      keys.ensureSize(childCount, true);
-      values.ensureSize(childCount, true);
-      for (int i = 0; i < length; ++i) {
-        keys.setElement(i + offset, inputOffset + i, input.keys);
-        values.setElement(i + offset, inputOffset + i, input.values);
-      }
-    }
-  }
-
-  @Override
-  public void stringifyValue(StringBuilder buffer, int row) {
-    if (isRepeating) {
-      row = 0;
-    }
-    if (noNulls || !isNull[row]) {
-      buffer.append('[');
-      boolean isFirst = true;
-      for(long i=offsets[row]; i < offsets[row] + lengths[row]; ++i) {
-        if (isFirst) {
-          isFirst = false;
-        } else {
-          buffer.append(", ");
-        }
-        buffer.append("{\"key\": ");
-        keys.stringifyValue(buffer, (int) i);
-        buffer.append(", \"value\": ");
-        values.stringifyValue(buffer, (int) i);
-        buffer.append('}');
-      }
-      buffer.append(']');
-    } else {
-      buffer.append("null");
-    }
-  }
-
-  @Override
-  public void init() {
-    super.init();
-    keys.init();
-    values.init();
-  }
-
-  @Override
-  public void reset() {
-    super.reset();
-    keys.reset();
-    values.reset();
-  }
-
-  @Override
-  public void unFlatten() {
-    super.unFlatten();
-    if (!isRepeating || noNulls || !isNull[0]) {
-      keys.unFlatten();
-      values.unFlatten();
-    }
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/MultiValuedColumnVector.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/MultiValuedColumnVector.java
deleted file mode 100644
index 1aeff83..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/MultiValuedColumnVector.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec.vector;
-
-import java.util.Arrays;
-
-/**
- * The representation of a vectorized column of multi-valued objects, such
- * as lists and maps.
- *
- * Each object is composed of a range of elements in the underlying child
- * ColumnVector. The range for list i is
- * offsets[i]..offsets[i]+lengths[i]-1 inclusive.
- */
-public abstract class MultiValuedColumnVector extends ColumnVector {
-
-  public long[] offsets;
-  public long[] lengths;
-  // the number of children slots used
-  public int childCount;
-
-  /**
-   * Constructor for MultiValuedColumnVector.
-   *
-   * @param len Vector length
-   */
-  public MultiValuedColumnVector(int len) {
-    super(len);
-    childCount = 0;
-    offsets = new long[len];
-    lengths = new long[len];
-  }
-
-  protected abstract void childFlatten(boolean useSelected, int[] selected,
-                                       int size);
-
-  @Override
-  public void flatten(boolean selectedInUse, int[] sel, int size) {
-    flattenPush();
-
-    if (isRepeating) {
-      if (noNulls || !isNull[0]) {
-        if (selectedInUse) {
-          for (int i = 0; i < size; ++i) {
-            int row = sel[i];
-            offsets[row] = offsets[0];
-            lengths[row] = lengths[0];
-            isNull[row] = false;
-          }
-        } else {
-          Arrays.fill(offsets, 0, size, offsets[0]);
-          Arrays.fill(lengths, 0, size, lengths[0]);
-          Arrays.fill(isNull, 0, size, false);
-        }
-        // We optimize by assuming that a repeating list/map will run from
-        // from 0 .. lengths[0] in the child vector.
-        // Sanity check the assumption that we can start at 0.
-        if (offsets[0] != 0) {
-          throw new IllegalArgumentException("Repeating offset isn't 0, but " +
-                                             offsets[0]);
-        }
-        childFlatten(false, null, (int) lengths[0]);
-      } else {
-        if (selectedInUse) {
-          for(int i=0; i < size; ++i) {
-            isNull[sel[i]] = true;
-          }
-        } else {
-          Arrays.fill(isNull, 0, size, true);
-        }
-      }
-      isRepeating = false;
-      noNulls = false;
-    } else {
-      if (selectedInUse) {
-        int childSize = 0;
-        for(int i=0; i < size; ++i) {
-          childSize += lengths[sel[i]];
-        }
-        int[] childSelection = new int[childSize];
-        int idx = 0;
-        for(int i=0; i < size; ++i) {
-          int row = sel[i];
-          for(int elem=0; elem < lengths[row]; ++elem) {
-            childSelection[idx++] = (int) (offsets[row] + elem);
-          }
-        }
-        childFlatten(true, childSelection, childSize);
-      } else {
-        childFlatten(false, null, childCount);
-      }
-      flattenNoNulls(selectedInUse, sel, size);
-    }
-  }
-
-  @Override
-  public void ensureSize(int size, boolean preserveData) {
-    super.ensureSize(size, preserveData);
-    if (size > offsets.length) {
-      long[] oldOffsets = offsets;
-      offsets = new long[size];
-      long oldLengths[] = lengths;
-      lengths = new long[size];
-      if (preserveData) {
-        if (isRepeating) {
-          offsets[0] = oldOffsets[0];
-          lengths[0] = oldLengths[0];
-        } else {
-          System.arraycopy(oldOffsets, 0, offsets, 0 , oldOffsets.length);
-          System.arraycopy(oldLengths, 0, lengths, 0, oldLengths.length);
-        }
-      }
-    }
-  }
-
-  /**
-   * Initializee the vector
-   */
-  @Override
-  public void init() {
-    super.init();
-    childCount = 0;
-  }
-
-  /**
-   * Reset the vector for the next batch.
-   */
-  @Override
-  public void reset() {
-    super.reset();
-    childCount = 0;
-  }
-
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/StructColumnVector.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/StructColumnVector.java
deleted file mode 100644
index cf07bca..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/StructColumnVector.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec.vector;
-
-/**
- * The representation of a vectorized column of struct objects.
- *
- * Each field is represented by a separate inner ColumnVector. Since this
- * ColumnVector doesn't own any per row data other that the isNull flag, the
- * isRepeating only covers the isNull array.
- */
-public class StructColumnVector extends ColumnVector {
-
-  public ColumnVector[] fields;
-
-  public StructColumnVector() {
-    this(VectorizedRowBatch.DEFAULT_SIZE);
-  }
-
-  /**
-   * Constructor for StructColumnVector
-   *
-   * @param len Vector length
-   * @param fields the field column vectors
-   */
-  public StructColumnVector(int len, ColumnVector... fields) {
-    super(len);
-    this.fields = fields;
-  }
-
-  @Override
-  public void flatten(boolean selectedInUse, int[] sel, int size) {
-    flattenPush();
-    for(int i=0; i < fields.length; ++i) {
-      fields[i].flatten(selectedInUse, sel, size);
-    }
-    flattenNoNulls(selectedInUse, sel, size);
-  }
-
-  @Override
-  public void setElement(int outElementNum, int inputElementNum,
-                         ColumnVector inputVector) {
-    if (inputVector.isRepeating) {
-      inputElementNum = 0;
-    }
-    if (inputVector.noNulls || !inputVector.isNull[inputElementNum]) {
-      isNull[outElementNum] = false;
-      ColumnVector[] inputFields = ((StructColumnVector) inputVector).fields;
-      for (int i = 0; i < inputFields.length; ++i) {
-        fields[i].setElement(outElementNum, inputElementNum, inputFields[i]);
-      }
-    } else {
-      noNulls = false;
-      isNull[outElementNum] = true;
-    }
-  }
-
-  @Override
-  public void stringifyValue(StringBuilder buffer, int row) {
-    if (isRepeating) {
-      row = 0;
-    }
-    if (noNulls || !isNull[row]) {
-      buffer.append('[');
-      for(int i=0; i < fields.length; ++i) {
-        if (i != 0) {
-          buffer.append(", ");
-        }
-        fields[i].stringifyValue(buffer, row);
-      }
-      buffer.append(']');
-    } else {
-      buffer.append("null");
-    }
-  }
-
-  @Override
-  public void ensureSize(int size, boolean preserveData) {
-    super.ensureSize(size, preserveData);
-    for(int i=0; i < fields.length; ++i) {
-      fields[i].ensureSize(size, preserveData);
-    }
-  }
-
-  @Override
-  public void reset() {
-    super.reset();
-    for(int i =0; i < fields.length; ++i) {
-      fields[i].reset();
-    }
-  }
-
-  @Override
-  public void init() {
-    super.init();
-    for(int i =0; i < fields.length; ++i) {
-      fields[i].init();
-    }
-  }
-
-  @Override
-  public void unFlatten() {
-    super.unFlatten();
-    for(int i=0; i < fields.length; ++i) {
-      fields[i].unFlatten();
-    }
-  }
-
-  @Override
-  public void setRepeating(boolean isRepeating) {
-    super.setRepeating(isRepeating);
-    for(int i=0; i < fields.length; ++i) {
-      fields[i].setRepeating(isRepeating);
-    }
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/TimestampColumnVector.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/TimestampColumnVector.java
deleted file mode 100644
index 28997a0..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/TimestampColumnVector.java
+++ /dev/null
@@ -1,419 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.exec.vector;
-
-import java.sql.Timestamp;
-import java.util.Arrays;
-
-import org.apache.hadoop.io.Writable;
-
-/**
- * This class represents a nullable timestamp column vector capable of handing a wide range of
- * timestamp values.
- *
- * We store the 2 (value) fields of a Timestamp class in primitive arrays.
- *
- * We do this to avoid an array of Java Timestamp objects which would have poor storage
- * and memory access characteristics.
- *
- * Generally, the caller will fill in a scratch timestamp object with values from a row, work
- * using the scratch timestamp, and then perhaps update the column vector row with a result.
- */
-public class TimestampColumnVector extends ColumnVector {
-
-  /*
-   * The storage arrays for this column vector corresponds to the storage of a Timestamp:
-   */
-  public long[] time;
-      // The values from Timestamp.getTime().
-
-  public int[] nanos;
-      // The values from Timestamp.getNanos().
-
-  /*
-   * Scratch objects.
-   */
-  private final Timestamp scratchTimestamp;
-
-  private Writable scratchWritable;
-      // Supports keeping a TimestampWritable object without having to import that definition...
-
-  /**
-   * Use this constructor by default. All column vectors
-   * should normally be the default size.
-   */
-  public TimestampColumnVector() {
-    this(VectorizedRowBatch.DEFAULT_SIZE);
-  }
-
-  /**
-   * Don't use this except for testing purposes.
-   *
-   * @param len the number of rows
-   */
-  public TimestampColumnVector(int len) {
-    super(len);
-
-    time = new long[len];
-    nanos = new int[len];
-
-    scratchTimestamp = new Timestamp(0);
-
-    scratchWritable = null;     // Allocated by caller.
-  }
-
-  /**
-   * Return the number of rows.
-   * @return
-   */
-  public int getLength() {
-    return time.length;
-  }
-
-  /**
-   * Return a row's Timestamp.getTime() value.
-   * We assume the entry has already been NULL checked and isRepeated adjusted.
-   * @param elementNum
-   * @return
-   */
-  public long getTime(int elementNum) {
-    return time[elementNum];
-  }
-
-  /**
-   * Return a row's Timestamp.getNanos() value.
-   * We assume the entry has already been NULL checked and isRepeated adjusted.
-   * @param elementNum
-   * @return
-   */
-  public int getNanos(int elementNum) {
-    return nanos[elementNum];
-  }
-
-  /**
-   * Set a Timestamp object from a row of the column.
-   * We assume the entry has already been NULL checked and isRepeated adjusted.
-   * @param timestamp
-   * @param elementNum
-   */
-  public void timestampUpdate(Timestamp timestamp, int elementNum) {
-    timestamp.setTime(time[elementNum]);
-    timestamp.setNanos(nanos[elementNum]);
-  }
-
-  /**
-   * Return the scratch Timestamp object set from a row.
-   * We assume the entry has already been NULL checked and isRepeated adjusted.
-   * @param elementNum
-   * @return
-   */
-  public Timestamp asScratchTimestamp(int elementNum) {
-    scratchTimestamp.setTime(time[elementNum]);
-    scratchTimestamp.setNanos(nanos[elementNum]);
-    return scratchTimestamp;
-  }
-
-  /**
-   * Return the scratch timestamp (contents undefined).
-   * @return
-   */
-  public Timestamp getScratchTimestamp() {
-    return scratchTimestamp;
-  }
-
-  /**
-   * Return a long representation of a Timestamp.
-   * @param elementNum
-   * @return
-   */
-  public long getTimestampAsLong(int elementNum) {
-    scratchTimestamp.setTime(time[elementNum]);
-    scratchTimestamp.setNanos(nanos[elementNum]);
-    return getTimestampAsLong(scratchTimestamp);
-  }
-
-  /**
-   * Return a long representation of a Timestamp.
-   * @param timestamp
-   * @return
-   */
-  public static long getTimestampAsLong(Timestamp timestamp) {
-    return millisToSeconds(timestamp.getTime());
-  }
-
-  // Copy of TimestampWritable.millisToSeconds
-  /**
-   * Rounds the number of milliseconds relative to the epoch down to the nearest whole number of
-   * seconds. 500 would round to 0, -500 would round to -1.
-   */
-  private static long millisToSeconds(long millis) {
-    if (millis >= 0) {
-      return millis / 1000;
-    } else {
-      return (millis - 999) / 1000;
-    }
-  }
-
-  /**
-   * Return a double representation of a Timestamp.
-   * @param elementNum
-   * @return
-   */
-  public double getDouble(int elementNum) {
-    scratchTimestamp.setTime(time[elementNum]);
-    scratchTimestamp.setNanos(nanos[elementNum]);
-    return getDouble(scratchTimestamp);
-  }
-
-  /**
-   * Return a double representation of a Timestamp.
-   * @param timestamp
-   * @return
-   */
-  public static double getDouble(Timestamp timestamp) {
-    // Same algorithm as TimestampWritable (not currently import-able here).
-    double seconds, nanos;
-    seconds = millisToSeconds(timestamp.getTime());
-    nanos = timestamp.getNanos();
-    return seconds + nanos / 1000000000;
-  }
-
-  /**
-   * Compare row to Timestamp.
-   * We assume the entry has already been NULL checked and isRepeated adjusted.
-   * @param elementNum
-   * @param timestamp
-   * @return -1, 0, 1 standard compareTo values.
-   */
-  public int compareTo(int elementNum, Timestamp timestamp) {
-    return asScratchTimestamp(elementNum).compareTo(timestamp);
-  }
-
-  /**
-   * Compare Timestamp to row.
-   * We assume the entry has already been NULL checked and isRepeated adjusted.
-   * @param timestamp
-   * @param elementNum
-   * @return -1, 0, 1 standard compareTo values.
-   */
-  public int compareTo(Timestamp timestamp, int elementNum) {
-    return timestamp.compareTo(asScratchTimestamp(elementNum));
-  }
-
-  /**
-   * Compare a row to another TimestampColumnVector's row.
-   * @param elementNum1
-   * @param timestampColVector2
-   * @param elementNum2
-   * @return
-   */
-  public int compareTo(int elementNum1, TimestampColumnVector timestampColVector2,
-      int elementNum2) {
-    return asScratchTimestamp(elementNum1).compareTo(
-        timestampColVector2.asScratchTimestamp(elementNum2));
-  }
-
-  /**
-   * Compare another TimestampColumnVector's row to a row.
-   * @param timestampColVector1
-   * @param elementNum1
-   * @param elementNum2
-   * @return
-   */
-  public int compareTo(TimestampColumnVector timestampColVector1, int elementNum1,
-      int elementNum2) {
-    return timestampColVector1.asScratchTimestamp(elementNum1).compareTo(
-        asScratchTimestamp(elementNum2));
-  }
-
-  @Override
-  public void setElement(int outElementNum, int inputElementNum, ColumnVector inputVector) {
-
-    TimestampColumnVector timestampColVector = (TimestampColumnVector) inputVector;
-
-    time[outElementNum] = timestampColVector.time[inputElementNum];
-    nanos[outElementNum] = timestampColVector.nanos[inputElementNum];
-  }
-
-  // Simplify vector by brute-force flattening noNulls and isRepeating
-  // This can be used to reduce combinatorial explosion of code paths in VectorExpressions
-  // with many arguments.
-  public void flatten(boolean selectedInUse, int[] sel, int size) {
-    flattenPush();
-    if (isRepeating) {
-      isRepeating = false;
-      long repeatFastTime = time[0];
-      int repeatNanos = nanos[0];
-      if (selectedInUse) {
-        for (int j = 0; j < size; j++) {
-          int i = sel[j];
-          time[i] = repeatFastTime;
-          nanos[i] = repeatNanos;
-        }
-      } else {
-        Arrays.fill(time, 0, size, repeatFastTime);
-        Arrays.fill(nanos, 0, size, repeatNanos);
-      }
-      flattenRepeatingNulls(selectedInUse, sel, size);
-    }
-    flattenNoNulls(selectedInUse, sel, size);
-  }
-
-  /**
-   * Set a row from a timestamp.
-   * We assume the entry has already been isRepeated adjusted.
-   * @param elementNum
-   * @param timestamp
-   */
-  public void set(int elementNum, Timestamp timestamp) {
-    if (timestamp == null) {
-      this.noNulls = false;
-      this.isNull[elementNum] = true;
-    } else {
-      this.time[elementNum] = timestamp.getTime();
-      this.nanos[elementNum] = timestamp.getNanos();
-    }
-  }
-
-  /**
-   * Set a row from the current value in the scratch timestamp.
-   * @param elementNum
-   */
-  public void setFromScratchTimestamp(int elementNum) {
-    this.time[elementNum] = scratchTimestamp.getTime();
-    this.nanos[elementNum] = scratchTimestamp.getNanos();
-  }
-
-  /**
-   * Set row to standard null value(s).
-   * We assume the entry has already been isRepeated adjusted.
-   * @param elementNum
-   */
-  public void setNullValue(int elementNum) {
-    time[elementNum] = 0;
-    nanos[elementNum] = 1;
-  }
-
-  // Copy the current object contents into the output. Only copy selected entries,
-  // as indicated by selectedInUse and the sel array.
-  public void copySelected(
-      boolean selectedInUse, int[] sel, int size, TimestampColumnVector output) {
-
-    // Output has nulls if and only if input has nulls.
-    output.noNulls = noNulls;
-    output.isRepeating = false;
-
-    // Handle repeating case
-    if (isRepeating) {
-      output.time[0] = time[0];
-      output.nanos[0] = nanos[0];
-      output.isNull[0] = isNull[0];
-      output.isRepeating = true;
-      return;
-    }
-
-    // Handle normal case
-
-    // Copy data values over
-    if (selectedInUse) {
-      for (int j = 0; j < size; j++) {
-        int i = sel[j];
-        output.time[i] = time[i];
-        output.nanos[i] = nanos[i];
-      }
-    }
-    else {
-      System.arraycopy(time, 0, output.time, 0, size);
-      System.arraycopy(nanos, 0, output.nanos, 0, size);
-    }
-
-    // Copy nulls over if needed
-    if (!noNulls) {
-      if (selectedInUse) {
-        for (int j = 0; j < size; j++) {
-          int i = sel[j];
-          output.isNull[i] = isNull[i];
-        }
-      }
-      else {
-        System.arraycopy(isNull, 0, output.isNull, 0, size);
-      }
-    }
-  }
-
-  /**
-   * Fill all the vector entries with a timestamp.
-   * @param timestamp
-   */
-  public void fill(Timestamp timestamp) {
-    noNulls = true;
-    isRepeating = true;
-    time[0] = timestamp.getTime();
-    nanos[0] = timestamp.getNanos();
-  }
-
-  /**
-   * Return a convenience writable object stored by this column vector.
-   * Supports keeping a TimestampWritable object without having to import that definition...
-   * @return
-   */
-  public Writable getScratchWritable() {
-    return scratchWritable;
-  }
-
-  /**
-   * Set the convenience writable object stored by this column vector
-   * @param scratchWritable
-   */
-  public void setScratchWritable(Writable scratchWritable) {
-    this.scratchWritable = scratchWritable;
-  }
-
-  @Override
-  public void stringifyValue(StringBuilder buffer, int row) {
-    if (isRepeating) {
-      row = 0;
-    }
-    if (noNulls || !isNull[row]) {
-      scratchTimestamp.setTime(time[row]);
-      scratchTimestamp.setNanos(nanos[row]);
-      buffer.append(scratchTimestamp.toString());
-    } else {
-      buffer.append("null");
-    }
-  }
-
-  @Override
-  public void ensureSize(int size, boolean preserveData) {
-    super.ensureSize(size, preserveData);
-    if (size <= time.length) return;
-    long[] oldTime = time;
-    int[] oldNanos = nanos;
-    time = new long[size];
-    nanos = new int[size];
-    if (preserveData) {
-      if (isRepeating) {
-        time[0] = oldTime[0];
-        nanos[0] = oldNanos[0];
-      } else {
-        System.arraycopy(oldTime, 0, time, 0, oldTime.length);
-        System.arraycopy(oldNanos, 0, nanos, 0, oldNanos.length);
-      }
-    }
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/UnionColumnVector.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/UnionColumnVector.java
deleted file mode 100644
index 0c61243..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/UnionColumnVector.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec.vector;
-
-/**
- * The representation of a vectorized column of struct objects.
- *
- * Each field is represented by a separate inner ColumnVector. Since this
- * ColumnVector doesn't own any per row data other that the isNull flag, the
- * isRepeating only covers the isNull array.
- */
-public class UnionColumnVector extends ColumnVector {
-
-  public int[] tags;
-  public ColumnVector[] fields;
-
-  public UnionColumnVector() {
-    this(VectorizedRowBatch.DEFAULT_SIZE);
-  }
-
-  /**
-   * Constructor for UnionColumnVector
-   *
-   * @param len Vector length
-   * @param fields the field column vectors
-   */
-  public UnionColumnVector(int len, ColumnVector... fields) {
-    super(len);
-    tags = new int[len];
-    this.fields = fields;
-  }
-
-  @Override
-  public void flatten(boolean selectedInUse, int[] sel, int size) {
-    flattenPush();
-    for(int i=0; i < fields.length; ++i) {
-      fields[i].flatten(selectedInUse, sel, size);
-    }
-    flattenNoNulls(selectedInUse, sel, size);
-  }
-
-  @Override
-  public void setElement(int outElementNum, int inputElementNum,
-                         ColumnVector inputVector) {
-    if (inputVector.isRepeating) {
-      inputElementNum = 0;
-    }
-    if (inputVector.noNulls || !inputVector.isNull[inputElementNum]) {
-      isNull[outElementNum] = false;
-      UnionColumnVector input = (UnionColumnVector) inputVector;
-      tags[outElementNum] = input.tags[inputElementNum];
-      fields[tags[outElementNum]].setElement(outElementNum, inputElementNum,
-          input.fields[tags[outElementNum]]);
-    } else {
-      noNulls = false;
-      isNull[outElementNum] = true;
-    }
-  }
-
-  @Override
-  public void stringifyValue(StringBuilder buffer, int row) {
-    if (isRepeating) {
-      row = 0;
-    }
-    if (noNulls || !isNull[row]) {
-      buffer.append("{\"tag\": ");
-      buffer.append(tags[row]);
-      buffer.append(", \"value\": ");
-      fields[tags[row]].stringifyValue(buffer, row);
-      buffer.append('}');
-    } else {
-      buffer.append("null");
-    }
-  }
-
-  @Override
-  public void ensureSize(int size, boolean preserveData) {
-    super.ensureSize(size, preserveData);
-    if (tags.length < size) {
-      if (preserveData) {
-        int[] oldTags = tags;
-        tags = new int[size];
-        System.arraycopy(oldTags, 0, tags, 0, oldTags.length);
-      } else {
-        tags = new int[size];
-      }
-      for(int i=0; i < fields.length; ++i) {
-        fields[i].ensureSize(size, preserveData);
-      }
-    }
-  }
-
-  @Override
-  public void reset() {
-    super.reset();
-    for(int i =0; i < fields.length; ++i) {
-      fields[i].reset();
-    }
-  }
-
-  @Override
-  public void init() {
-    super.init();
-    for(int i =0; i < fields.length; ++i) {
-      fields[i].init();
-    }
-  }
-
-  @Override
-  public void unFlatten() {
-    super.unFlatten();
-    for(int i=0; i < fields.length; ++i) {
-      fields[i].unFlatten();
-    }
-  }
-
-  @Override
-  public void setRepeating(boolean isRepeating) {
-    super.setRepeating(isRepeating);
-    for(int i=0; i < fields.length; ++i) {
-      fields[i].setRepeating(isRepeating);
-    }
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatch.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatch.java
deleted file mode 100644
index 9c066e0..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatch.java
+++ /dev/null
@@ -1,218 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.exec.vector;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.io.Writable;
-
-/**
- * A VectorizedRowBatch is a set of rows, organized with each column
- * as a vector. It is the unit of query execution, organized to minimize
- * the cost per row and achieve high cycles-per-instruction.
- * The major fields are public by design to allow fast and convenient
- * access by the vectorized query execution code.
- */
-public class VectorizedRowBatch implements Writable {
-  public int numCols;           // number of columns
-  public ColumnVector[] cols;   // a vector for each column
-  public int size;              // number of rows that qualify (i.e. haven't been filtered out)
-  public int[] selected;        // array of positions of selected values
-  public int[] projectedColumns;
-  public int projectionSize;
-
-  private int dataColumnCount;
-  private int partitionColumnCount;
-
-
-  /*
-   * If no filtering has been applied yet, selectedInUse is false,
-   * meaning that all rows qualify. If it is true, then the selected[] array
-   * records the offsets of qualifying rows.
-   */
-  public boolean selectedInUse;
-
-  // If this is true, then there is no data in the batch -- we have hit the end of input.
-  public boolean endOfFile;
-
-  /*
-   * This number is carefully chosen to minimize overhead and typically allows
-   * one VectorizedRowBatch to fit in cache.
-   */
-  public static final int DEFAULT_SIZE = 1024;
-
-  /**
-   * Return a batch with the specified number of columns.
-   * This is the standard constructor -- all batches should be the same size
-   *
-   * @param numCols the number of columns to include in the batch
-   */
-  public VectorizedRowBatch(int numCols) {
-    this(numCols, DEFAULT_SIZE);
-  }
-
-  /**
-   * Return a batch with the specified number of columns and rows.
-   * Only call this constructor directly for testing purposes.
-   * Batch size should normally always be defaultSize.
-   *
-   * @param numCols the number of columns to include in the batch
-   * @param size  the number of rows to include in the batch
-   */
-  public VectorizedRowBatch(int numCols, int size) {
-    this.numCols = numCols;
-    this.size = size;
-    selected = new int[size];
-    selectedInUse = false;
-    this.cols = new ColumnVector[numCols];
-    projectedColumns = new int[numCols];
-
-    // Initially all columns are projected and in the same order
-    projectionSize = numCols;
-    for (int i = 0; i < numCols; i++) {
-      projectedColumns[i] = i;
-    }
-
-    dataColumnCount = -1;
-    partitionColumnCount = -1;
-  }
-
-  public void setPartitionInfo(int dataColumnCount, int partitionColumnCount) {
-    this.dataColumnCount = dataColumnCount;
-    this.partitionColumnCount = partitionColumnCount;
-  }
-
-  public int getDataColumnCount() {
-    return dataColumnCount;
-  }
-
-  public int getPartitionColumnCount() {
-    return partitionColumnCount;
-  }
-
-  /**
-   * Returns the maximum size of the batch (number of rows it can hold)
-   */
-  public int getMaxSize() {
-      return selected.length;
-  }
-
-  /**
-   * Return count of qualifying rows.
-   *
-   * @return number of rows that have not been filtered out
-   */
-  public long count() {
-    return size;
-  }
-
-  private static String toUTF8(Object o) {
-    if(o == null || o instanceof NullWritable) {
-      return "\\N"; /* as found in LazySimpleSerDe's nullSequence */
-    }
-    return o.toString();
-  }
-
-  @Override
-  public String toString() {
-    if (size == 0) {
-      return "";
-    }
-    StringBuilder b = new StringBuilder();
-    if (this.selectedInUse) {
-      for (int j = 0; j < size; j++) {
-        int i = selected[j];
-        b.append('[');
-        for (int k = 0; k < projectionSize; k++) {
-          int projIndex = projectedColumns[k];
-          ColumnVector cv = cols[projIndex];
-          if (k > 0) {
-            b.append(", ");
-          }
-          cv.stringifyValue(b, i);
-        }
-        b.append(']');
-        if (j < size - 1) {
-          b.append('\n');
-        }
-      }
-    } else {
-      for (int i = 0; i < size; i++) {
-        b.append('[');
-        for (int k = 0; k < projectionSize; k++) {
-          int projIndex = projectedColumns[k];
-          ColumnVector cv = cols[projIndex];
-          if (k > 0) {
-            b.append(", ");
-          }
-          if (cv != null) {
-            cv.stringifyValue(b, i);
-          }
-        }
-        b.append(']');
-        if (i < size - 1) {
-          b.append('\n');
-        }
-      }
-    }
-    return b.toString();
-  }
-
-  @Override
-  public void readFields(DataInput arg0) throws IOException {
-    throw new UnsupportedOperationException("Do you really need me?");
-  }
-
-  @Override
-  public void write(DataOutput arg0) throws IOException {
-    throw new UnsupportedOperationException("Don't call me");
-  }
-
-  /**
-   * Resets the row batch to default state
-   *  - sets selectedInUse to false
-   *  - sets size to 0
-   *  - sets endOfFile to false
-   *  - resets each column
-   *  - inits each column
-   */
-  public void reset() {
-    selectedInUse = false;
-    size = 0;
-    endOfFile = false;
-    for (ColumnVector vc : cols) {
-      if (vc != null) {
-        vc.reset();
-        vc.init();
-      }
-    }
-  }
-
-  /**
-   * Set the maximum number of rows in the batch.
-   * Data is not preserved.
-   */
-  public void ensureSize(int rows) {
-    for(int i=0; i < cols.length; ++i) {
-      cols[i].ensureSize(rows, false);
-    }
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringExpr.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringExpr.java
deleted file mode 100644
index 90817a5..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringExpr.java
+++ /dev/null
@@ -1,354 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec.vector.expressions;
-
-import java.util.Arrays;
-
-import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
-
-/**
- * String expression evaluation helper functions.
- */
-public class StringExpr {
-
-  /* Compare two strings from two byte arrays each
-   * with their own start position and length.
-   * Use lexicographic unsigned byte value order.
-   * This is what's used for UTF-8 sort order.
-   * Return negative value if arg1 < arg2, 0 if arg1 = arg2,
-   * positive if arg1 > arg2.
-   */
-  public static int compare(byte[] arg1, int start1, int len1, byte[] arg2, int start2, int len2) {
-    for (int i = 0; i < len1 && i < len2; i++) {
-      // Note the "& 0xff" is just a way to convert unsigned bytes to signed integer.
-      int b1 = arg1[i + start1] & 0xff;
-      int b2 = arg2[i + start2] & 0xff;
-      if (b1 != b2) {
-        return b1 - b2;
-      }
-    }
-    return len1 - len2;
-  }
-
-  /* Determine if two strings are equal from two byte arrays each
-   * with their own start position and length.
-   * Use lexicographic unsigned byte value order.
-   * This is what's used for UTF-8 sort order.
-   */
-  public static boolean equal(byte[] arg1, final int start1, final int len1,
-      byte[] arg2, final int start2, final int len2) {
-    if (len1 != len2) {
-      return false;
-    }
-    if (len1 == 0) {
-      return true;
-    }
-
-    // do bounds check for OOB exception
-    if (arg1[start1] != arg2[start2]
-        || arg1[start1 + len1 - 1] != arg2[start2 + len2 - 1]) {
-      return false;
-    }
-
-    if (len1 == len2) {
-      // prove invariant to the compiler: len1 = len2
-      // all array access between (start1, start1+len1) 
-      // and (start2, start2+len2) are valid
-      // no more OOB exceptions are possible
-      final int step = 8;
-      final int remainder = len1 % step;
-      final int wlen = len1 - remainder;
-      // suffix first
-      for (int i = wlen; i < len1; i++) {
-        if (arg1[start1 + i] != arg2[start2 + i]) {
-          return false;
-        }
-      }
-      // SIMD loop
-      for (int i = 0; i < wlen; i += step) {
-        final int s1 = start1 + i;
-        final int s2 = start2 + i;
-        boolean neq = false;
-        for (int j = 0; j < step; j++) {
-          neq = (arg1[s1 + j] != arg2[s2 + j]) || neq;
-        }
-        if (neq) {
-          return false;
-        }
-      }
-    }
-
-    return true;
-  }
-
-  public static int characterCount(byte[] bytes) {
-    int end = bytes.length;
-
-    // count characters
-    int j = 0;
-    int charCount = 0;
-    while(j < end) {
-      // UTF-8 continuation bytes have 2 high bits equal to 0x80.
-      if ((bytes[j] & 0xc0) != 0x80) {
-        ++charCount;
-      }
-      j++;
-    }
-    return charCount;
-  }
-
-  public static int characterCount(byte[] bytes, int start, int length) {
-    int end = start + length;
-
-    // count characters
-    int j = start;
-    int charCount = 0;
-    while(j < end) {
-      // UTF-8 continuation bytes have 2 high bits equal to 0x80.
-      if ((bytes[j] & 0xc0) != 0x80) {
-        ++charCount;
-      }
-      j++;
-    }
-    return charCount;
-  }
-
-  // A setVal with the same function signature as rightTrim, leftTrim, truncate, etc, below.
-  // Useful for class generation via templates.
-  public static void assign(BytesColumnVector outV, int i, byte[] bytes, int start, int length) {
-    // set output vector
-    outV.setVal(i, bytes, start, length);
-  }
-
-  /*
-   * Right trim a slice of a byte array and return the new byte length.
-   */
-  public static int rightTrim(byte[] bytes, int start, int length) {
-    // skip trailing blank characters
-    int j = start + length - 1;
-    while(j >= start && bytes[j] == 0x20) {
-      j--;
-    }
-
-    return (j - start) + 1;
-  }
-
-  /*
-   * Right trim a slice of a byte array and place the result into element i of a vector.
-   */
-  public static void rightTrim(BytesColumnVector outV, int i, byte[] bytes, int start, int length) {
-    // skip trailing blank characters
-    int j = start + length - 1;
-    while(j >= start && bytes[j] == 0x20) {
-      j--;
-    }
-
-    // set output vector
-    outV.setVal(i, bytes, start, (j - start) + 1);
-  }
-
-  /*
-   * Truncate a slice of a byte array to a maximum number of characters and
-   * return the new byte length.
-   */
-  public static int truncate(byte[] bytes, int start, int length, int maxLength) {
-    int end = start + length;
-
-    // count characters forward
-    int j = start;
-    int charCount = 0;
-    while(j < end) {
-      // UTF-8 continuation bytes have 2 high bits equal to 0x80.
-      if ((bytes[j] & 0xc0) != 0x80) {
-        if (charCount == maxLength) {
-          break;
-        }
-        ++charCount;
-      }
-      j++;
-    }
-    return (j - start);
-  }
-
-  /*
-   * Truncate a slice of a byte array to a maximum number of characters and
-   * place the result into element i of a vector.
-   */
-  public static void truncate(BytesColumnVector outV, int i, byte[] bytes, int start, int length, int maxLength) {
-    int end = start + length;
-
-    // count characters forward
-    int j = start;
-    int charCount = 0;
-    while(j < end) {
-      // UTF-8 continuation bytes have 2 high bits equal to 0x80.
-      if ((bytes[j] & 0xc0) != 0x80) {
-        if (charCount == maxLength) {
-          break;
-        }
-        ++charCount;
-      }
-      j++;
-    }
-
-    // set output vector
-    outV.setVal(i, bytes, start, (j - start));
-  }
-
-  /*
-   * Truncate a byte array to a maximum number of characters and
-   * return a byte array with only truncated bytes.
-   */
-  public static byte[] truncateScalar(byte[] bytes, int maxLength) {
-    int end = bytes.length;
-
-    // count characters forward
-    int j = 0;
-    int charCount = 0;
-    while(j < end) {
-      // UTF-8 continuation bytes have 2 high bits equal to 0x80.
-      if ((bytes[j] & 0xc0) != 0x80) {
-        if (charCount == maxLength) {
-          break;
-        }
-        ++charCount;
-      }
-      j++;
-    }
-    if (j == end) {
-      return bytes;
-    } else {
-      return Arrays.copyOf(bytes, j);
-    }
-  }
-
-  /*
-   * Right trim and truncate a slice of a byte array to a maximum number of characters and
-   * return the new byte length.
-   */
-  public static int rightTrimAndTruncate(byte[] bytes, int start, int length, int maxLength) {
-    int end = start + length;
-
-    // count characters forward and watch for final run of pads
-    int j = start;
-    int charCount = 0;
-    int padRunStart = -1;
-    while(j < end) {
-      // UTF-8 continuation bytes have 2 high bits equal to 0x80.
-      if ((bytes[j] & 0xc0) != 0x80) {
-        if (charCount == maxLength) {
-          break;
-        }
-        if (bytes[j] == 0x20) {
-          if (padRunStart == -1) {
-            padRunStart = j;
-          }
-        } else {
-          padRunStart = -1;
-        }
-        ++charCount;
-      } else {
-        padRunStart = -1;
-      }
-      j++;
-    }
-    if (padRunStart != -1) {
-      return (padRunStart - start);
-    } else {
-      return (j - start);
-    }
-  }
-
-  /*
-   * Right trim and truncate a slice of a byte array to a maximum number of characters and
-   * place the result into element i of a vector.
-   */
-  public static void rightTrimAndTruncate(BytesColumnVector outV, int i, byte[] bytes, int start, int length, int maxLength) {
-    int end = start + length;
-
-    // count characters forward and watch for final run of pads
-    int j = start;
-    int charCount = 0;
-    int padRunStart = -1;
-    while(j < end) {
-      // UTF-8 continuation bytes have 2 high bits equal to 0x80.
-      if ((bytes[j] & 0xc0) != 0x80) {
-        if (charCount == maxLength) {
-          break;
-        }
-        if (bytes[j] == 0x20) {
-          if (padRunStart == -1) {
-            padRunStart = j;
-          }
-        } else {
-          padRunStart = -1;
-        }
-        ++charCount;
-      } else {
-        padRunStart = -1;
-      }
-      j++;
-    }
-    // set output vector
-    if (padRunStart != -1) {
-      outV.setVal(i, bytes, start, (padRunStart - start));
-    } else {
-      outV.setVal(i, bytes, start, (j - start) );
-    }
-  }
-
-  /*
-   * Right trim and truncate a byte array to a maximum number of characters and
-   * return a byte array with only the trimmed and truncated bytes.
-   */
-  public static byte[] rightTrimAndTruncateScalar(byte[] bytes, int maxLength) {
-    int end = bytes.length;
-
-    // count characters forward and watch for final run of pads
-    int j = 0;
-    int charCount = 0;
-    int padRunStart = -1;
-    while(j < end) {
-      // UTF-8 continuation bytes have 2 high bits equal to 0x80.
-      if ((bytes[j] & 0xc0) != 0x80) {
-        if (charCount == maxLength) {
-          break;
-        }
-        if (bytes[j] == 0x20) {
-          if (padRunStart == -1) {
-            padRunStart = j;
-          }
-        } else {
-          padRunStart = -1;
-        }
-        ++charCount;
-      } else {
-        padRunStart = -1;
-      }
-      j++;
-    }
-    if (padRunStart != -1) {
-      return Arrays.copyOf(bytes, padRunStart);
-    } else if (j == end) {
-      return bytes;
-    } else {
-      return Arrays.copyOf(bytes, j);
-    }
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/ExpressionTree.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/ExpressionTree.java
deleted file mode 100644
index 443083d..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/ExpressionTree.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.io.sarg;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-/**
- * The inner representation of the SearchArgument. Most users should not
- * need this interface, it is only for file formats that need to translate
- * the SearchArgument into an internal form.
- */
-public class ExpressionTree {
-  public enum Operator {OR, AND, NOT, LEAF, CONSTANT}
-  private final Operator operator;
-  private final List<ExpressionTree> children;
-  private int leaf;
-  private final SearchArgument.TruthValue constant;
-
-  ExpressionTree() {
-    operator = null;
-    children = null;
-    leaf = 0;
-    constant = null;
-  }
-
-  ExpressionTree(Operator op, ExpressionTree... kids) {
-    operator = op;
-    children = new ArrayList<ExpressionTree>();
-    leaf = -1;
-    this.constant = null;
-    Collections.addAll(children, kids);
-  }
-
-  ExpressionTree(int leaf) {
-    operator = Operator.LEAF;
-    children = null;
-    this.leaf = leaf;
-    this.constant = null;
-  }
-
-  ExpressionTree(SearchArgument.TruthValue constant) {
-    operator = Operator.CONSTANT;
-    children = null;
-    this.leaf = -1;
-    this.constant = constant;
-  }
-
-  ExpressionTree(ExpressionTree other) {
-    this.operator = other.operator;
-    if (other.children == null) {
-      this.children = null;
-    } else {
-      this.children = new ArrayList<ExpressionTree>();
-      for(ExpressionTree child: other.children) {
-        children.add(new ExpressionTree(child));
-      }
-    }
-    this.leaf = other.leaf;
-    this.constant = other.constant;
-  }
-
-  public SearchArgument.TruthValue evaluate(SearchArgument.TruthValue[] leaves
-                                            ) {
-    SearchArgument.TruthValue result = null;
-    switch (operator) {
-      case OR:
-        for(ExpressionTree child: children) {
-          result = child.evaluate(leaves).or(result);
-        }
-        return result;
-      case AND:
-        for(ExpressionTree child: children) {
-          result = child.evaluate(leaves).and(result);
-        }
-        return result;
-      case NOT:
-        return children.get(0).evaluate(leaves).not();
-      case LEAF:
-        return leaves[leaf];
-      case CONSTANT:
-        return constant;
-      default:
-        throw new IllegalStateException("Unknown operator: " + operator);
-    }
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder buffer = new StringBuilder();
-    switch (operator) {
-      case OR:
-        buffer.append("(or");
-        for(ExpressionTree child: children) {
-          buffer.append(' ');
-          buffer.append(child.toString());
-        }
-        buffer.append(')');
-        break;
-      case AND:
-        buffer.append("(and");
-        for(ExpressionTree child: children) {
-          buffer.append(' ');
-          buffer.append(child.toString());
-        }
-        buffer.append(')');
-        break;
-      case NOT:
-        buffer.append("(not ");
-        buffer.append(children.get(0));
-        buffer.append(')');
-        break;
-      case LEAF:
-        buffer.append("leaf-");
-        buffer.append(leaf);
-        break;
-      case CONSTANT:
-        buffer.append(constant);
-        break;
-    }
-    return buffer.toString();
-  }
-
-  public Operator getOperator() {
-    return operator;
-  }
-
-  public List<ExpressionTree> getChildren() {
-    return children;
-  }
-
-  public SearchArgument.TruthValue getConstant() {
-    return constant;
-  }
-
-  public int getLeaf() {
-    return leaf;
-  }
-
-  public void setLeaf(int leaf) {
-    this.leaf = leaf;
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/PredicateLeaf.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/PredicateLeaf.java
deleted file mode 100644
index 469a3da..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/PredicateLeaf.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.io.sarg;
-
-import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
-
-import java.sql.Date;
-import java.sql.Timestamp;
-import java.util.List;
-
-/**
- * The primitive predicates that form a SearchArgument.
- */
-public interface PredicateLeaf {
-
-  /**
-   * The possible operators for predicates. To get the opposites, construct
-   * an expression with a not operator.
-   */
-  public static enum Operator {
-    EQUALS,
-    NULL_SAFE_EQUALS,
-    LESS_THAN,
-    LESS_THAN_EQUALS,
-    IN,
-    BETWEEN,
-    IS_NULL
-  }
-
-  /**
-   * The possible types for sargs.
-   */
-  public static enum Type {
-    LONG(Long.class),      // all of the integer types
-    FLOAT(Double.class),   // float and double
-    STRING(String.class),  // string, char, varchar
-    DATE(Date.class),
-    DECIMAL(HiveDecimalWritable.class),
-    TIMESTAMP(Timestamp.class),
-    BOOLEAN(Boolean.class);
-
-    private final Class cls;
-    Type(Class cls) {
-      this.cls = cls;
-    }
-
-    /**
-     * For all SARG leaves, the values must be the matching class.
-     * @return the value class
-     */
-    public Class getValueClass() {
-      return cls;
-    }
-  }
-
-  /**
-   * Get the operator for the leaf.
-   */
-  public Operator getOperator();
-
-  /**
-   * Get the type of the column and literal by the file format.
-   */
-  public Type getType();
-
-  /**
-   * Get the simple column name.
-   * @return the column name
-   */
-  public String getColumnName();
-
-  /**
-   * Get the literal half of the predicate leaf. Adapt the original type for what orc needs
-   *
-   * @return an Integer, Long, Double, or String
-   */
-  public Object getLiteral();
-
-  /**
-   * For operators with multiple literals (IN and BETWEEN), get the literals.
-   *
-   * @return the list of literals (Integer, Longs, Doubles, or Strings)
-   *
-   */
-  public List<Object> getLiteralList();
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgument.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgument.java
deleted file mode 100644
index d70b3b0..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgument.java
+++ /dev/null
@@ -1,287 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.io.sarg;
-
-import java.util.List;
-
-/**
- * Primary interface for <a href="http://en.wikipedia.org/wiki/Sargable">
- *   SearchArgument</a>, which are the subset of predicates
- * that can be pushed down to the RecordReader. Each SearchArgument consists
- * of a series of SearchClauses that must each be true for the row to be
- * accepted by the filter.
- *
- * This requires that the filter be normalized into conjunctive normal form
- * (<a href="http://en.wikipedia.org/wiki/Conjunctive_normal_form">CNF</a>).
- */
-public interface SearchArgument {
-
-  /**
-   * The potential result sets of logical operations.
-   */
-  public static enum TruthValue {
-    YES, NO, NULL, YES_NULL, NO_NULL, YES_NO, YES_NO_NULL;
-
-    /**
-     * Compute logical or between the two values.
-     * @param right the other argument or null
-     * @return the result
-     */
-    public TruthValue or(TruthValue right) {
-      if (right == null || right == this) {
-        return this;
-      }
-      if (right == YES || this == YES) {
-        return YES;
-      }
-      if (right == YES_NULL || this == YES_NULL) {
-        return YES_NULL;
-      }
-      if (right == NO) {
-        return this;
-      }
-      if (this == NO) {
-        return right;
-      }
-      if (this == NULL) {
-        if (right == NO_NULL) {
-          return NULL;
-        } else {
-          return YES_NULL;
-        }
-      }
-      if (right == NULL) {
-        if (this == NO_NULL) {
-          return NULL;
-        } else {
-          return YES_NULL;
-        }
-      }
-      return YES_NO_NULL;
-    }
-
-    /**
-     * Compute logical AND between the two values.
-     * @param right the other argument or null
-     * @return the result
-     */
-    public TruthValue and(TruthValue right) {
-      if (right == null || right == this) {
-        return this;
-      }
-      if (right == NO || this == NO) {
-        return NO;
-      }
-      if (right == NO_NULL || this == NO_NULL) {
-        return NO_NULL;
-      }
-      if (right == YES) {
-        return this;
-      }
-      if (this == YES) {
-        return right;
-      }
-      if (this == NULL) {
-        if (right == YES_NULL) {
-          return NULL;
-        } else {
-          return NO_NULL;
-        }
-      }
-      if (right == NULL) {
-        if (this == YES_NULL) {
-          return NULL;
-        } else {
-          return NO_NULL;
-        }
-      }
-      return YES_NO_NULL;
-    }
-
-    public TruthValue not() {
-      switch (this) {
-        case NO:
-          return YES;
-        case YES:
-          return NO;
-        case NULL:
-        case YES_NO:
-        case YES_NO_NULL:
-          return this;
-        case NO_NULL:
-          return YES_NULL;
-        case YES_NULL:
-          return NO_NULL;
-        default:
-          throw new IllegalArgumentException("Unknown value: " + this);
-      }
-    }
-
-    /**
-     * Does the RecordReader need to include this set of records?
-     * @return true unless none of the rows qualify
-     */
-    public boolean isNeeded() {
-      switch (this) {
-        case NO:
-        case NULL:
-        case NO_NULL:
-          return false;
-        default:
-          return true;
-      }
-    }
-  }
-
-  /**
-   * Get the leaf predicates that are required to evaluate the predicate. The
-   * list will have the duplicates removed.
-   * @return the list of leaf predicates
-   */
-  public List<PredicateLeaf> getLeaves();
-
-  /**
-   * Get the expression tree. This should only needed for file formats that
-   * need to translate the expression to an internal form.
-   */
-  public ExpressionTree getExpression();
-
-  /**
-   * Evaluate the entire predicate based on the values for the leaf predicates.
-   * @param leaves the value of each leaf predicate
-   * @return the value of hte entire predicate
-   */
-  public TruthValue evaluate(TruthValue[] leaves);
-
-  /**
-   * A builder object for contexts outside of Hive where it isn't easy to
-   * get a ExprNodeDesc. The user must call startOr, startAnd, or startNot
-   * before adding any leaves.
-   */
-  public interface Builder {
-
-    /**
-     * Start building an or operation and push it on the stack.
-     * @return this
-     */
-    public Builder startOr();
-
-    /**
-     * Start building an and operation and push it on the stack.
-     * @return this
-     */
-    public Builder startAnd();
-
-    /**
-     * Start building a not operation and push it on the stack.
-     * @return this
-     */
-    public Builder startNot();
-
-    /**
-     * Finish the current operation and pop it off of the stack. Each start
-     * call must have a matching end.
-     * @return this
-     */
-    public Builder end();
-
-    /**
-     * Add a less than leaf to the current item on the stack.
-     * @param column the name of the column
-     * @param type the type of the expression
-     * @param literal the literal
-     * @return this
-     */
-    public Builder lessThan(String column, PredicateLeaf.Type type,
-                            Object literal);
-
-    /**
-     * Add a less than equals leaf to the current item on the stack.
-     * @param column the name of the column
-     * @param type the type of the expression
-     * @param literal the literal
-     * @return this
-     */
-    public Builder lessThanEquals(String column, PredicateLeaf.Type type,
-                                  Object literal);
-
-    /**
-     * Add an equals leaf to the current item on the stack.
-     * @param column the name of the column
-     * @param type the type of the expression
-     * @param literal the literal
-     * @return this
-     */
-    public Builder equals(String column, PredicateLeaf.Type type,
-                          Object literal);
-
-    /**
-     * Add a null safe equals leaf to the current item on the stack.
-     * @param column the name of the column
-     * @param type the type of the expression
-     * @param literal the literal
-     * @return this
-     */
-    public Builder nullSafeEquals(String column, PredicateLeaf.Type type,
-                                  Object literal);
-
-    /**
-     * Add an in leaf to the current item on the stack.
-     * @param column the name of the column
-     * @param type the type of the expression
-     * @param literal the literal
-     * @return this
-     */
-    public Builder in(String column, PredicateLeaf.Type type,
-                      Object... literal);
-
-    /**
-     * Add an is null leaf to the current item on the stack.
-     * @param column the name of the column
-     * @param type the type of the expression
-     * @return this
-     */
-    public Builder isNull(String column, PredicateLeaf.Type type);
-
-    /**
-     * Add a between leaf to the current item on the stack.
-     * @param column the name of the column
-     * @param type the type of the expression
-     * @param lower the literal
-     * @param upper the literal
-     * @return this
-     */
-    public Builder between(String column, PredicateLeaf.Type type,
-                           Object lower, Object upper);
-
-    /**
-     * Add a truth value to the expression.
-     * @param truth
-     * @return this
-     */
-    public Builder literal(TruthValue truth);
-
-    /**
-     * Build and return the SearchArgument that has been defined. All of the
-     * starts must have been ended before this call.
-     * @return the new SearchArgument
-     */
-    public SearchArgument build();
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentFactory.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentFactory.java
deleted file mode 100644
index 8fda95c..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentFactory.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.io.sarg;
-
-/**
- * A factory for creating SearchArguments, as well as modifying those created by this factory.
- */
-public class SearchArgumentFactory {
-  public static SearchArgument.Builder newBuilder() {
-    return new SearchArgumentImpl.BuilderImpl();
-  }
-  public static void setPredicateLeafColumn(PredicateLeaf leaf, String newName) {
-    SearchArgumentImpl.PredicateLeafImpl.setColumnName(leaf, newName);
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java
deleted file mode 100644
index 10d8c51..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java
+++ /dev/null
@@ -1,703 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.io.sarg;
-
-import java.sql.Timestamp;
-import java.util.ArrayDeque;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Deque;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Queue;
-import java.util.Set;
-
-/**
- * The implementation of SearchArguments. Visible for testing only.
- */
-public final class SearchArgumentImpl implements SearchArgument {
-
-  public static final class PredicateLeafImpl implements PredicateLeaf {
-    private final Operator operator;
-    private final Type type;
-    private String columnName;
-    private final Object literal;
-    private final List<Object> literalList;
-
-    // Used by kryo
-    @SuppressWarnings("unused")
-    PredicateLeafImpl() {
-      operator = null;
-      type = null;
-      columnName = null;
-      literal = null;
-      literalList = null;
-    }
-
-    public PredicateLeafImpl(Operator operator,
-                             Type type,
-                             String columnName,
-                             Object literal,
-                             List<Object> literalList) {
-      this.operator = operator;
-      this.type = type;
-      this.columnName = columnName;
-      this.literal = literal;
-      if (literal != null) {
-        if (literal.getClass() != type.getValueClass()) {
-          throw new IllegalArgumentException("Wrong value class " +
-              literal.getClass().getName() + " for " + type + "." + operator +
-              " leaf");
-        }
-      }
-      this.literalList = literalList;
-      if (literalList != null) {
-        Class valueCls = type.getValueClass();
-        for(Object lit: literalList) {
-          if (lit != null && lit.getClass() != valueCls) {
-            throw new IllegalArgumentException("Wrong value class item " +
-                lit.getClass().getName() + " for " + type + "." + operator +
-                " leaf");
-          }
-        }
-      }
-    }
-
-    @Override
-    public Operator getOperator() {
-      return operator;
-    }
-
-    @Override
-    public Type getType(){
-      return type;
-    }
-
-    @Override
-    public String getColumnName() {
-      return columnName;
-    }
-
-    @Override
-    public Object getLiteral() {
-      // To get around a kryo 2.22 bug while deserialize a Timestamp into Date
-      // (https://github.com/EsotericSoftware/kryo/issues/88)
-      // When we see a Date, convert back into Timestamp
-      if (literal instanceof java.util.Date) {
-        return new Timestamp(((java.util.Date)literal).getTime());
-      }
-      return literal;
-    }
-
-    @Override
-    public List<Object> getLiteralList() {
-      return literalList;
-    }
-
-    @Override
-    public String toString() {
-      StringBuilder buffer = new StringBuilder();
-      buffer.append('(');
-      buffer.append(operator);
-      buffer.append(' ');
-      buffer.append(columnName);
-      if (literal != null) {
-        buffer.append(' ');
-        buffer.append(literal);
-      } else if (literalList != null) {
-        for(Object lit: literalList) {
-          buffer.append(' ');
-          buffer.append(lit == null ? "null" : lit.toString());
-        }
-      }
-      buffer.append(')');
-      return buffer.toString();
-    }
-
-    private static boolean isEqual(Object left, Object right) {
-
-      return left == right ||
-          (left != null && right != null && left.equals(right));
-    }
-
-    @Override
-    public boolean equals(Object other) {
-      if (other == null || other.getClass() != getClass()) {
-        return false;
-      } else if (other == this) {
-        return true;
-      } else {
-        PredicateLeafImpl o = (PredicateLeafImpl) other;
-        return operator == o.operator &&
-            type == o.type &&
-            columnName.equals(o.columnName) &&
-            isEqual(literal, o.literal) &&
-            isEqual(literalList, o.literalList);
-      }
-    }
-
-    @Override
-    public int hashCode() {
-      return operator.hashCode() +
-             type.hashCode() * 17 +
-             columnName.hashCode() * 3 * 17+
-             (literal == null ? 0 : literal.hashCode()) * 101 * 3 * 17 +
-             (literalList == null ? 0 : literalList.hashCode()) *
-                 103 * 101 * 3 * 17;
-    }
-
-    public static void setColumnName(PredicateLeaf leaf, String newName) {
-      assert leaf instanceof PredicateLeafImpl;
-      ((PredicateLeafImpl)leaf).columnName = newName;
-    }
-  }
-
-  private final List<PredicateLeaf> leaves;
-  private final ExpressionTree expression;
-
-  SearchArgumentImpl(ExpressionTree expression, List<PredicateLeaf> leaves) {
-    this.expression = expression;
-    this.leaves = leaves;
-  }
-
-  // Used by kyro
-  @SuppressWarnings("unused")
-  SearchArgumentImpl() {
-        leaves = null;
-        expression = null;
-  }
-
-  @Override
-  public List<PredicateLeaf> getLeaves() {
-    return leaves;
-  }
-
-  @Override
-  public TruthValue evaluate(TruthValue[] leaves) {
-    return expression == null ? TruthValue.YES : expression.evaluate(leaves);
-  }
-
-  @Override
-  public ExpressionTree getExpression() {
-    return expression;
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder buffer = new StringBuilder();
-    for(int i=0; i < leaves.size(); ++i) {
-      buffer.append("leaf-");
-      buffer.append(i);
-      buffer.append(" = ");
-      buffer.append(leaves.get(i).toString());
-      buffer.append(", ");
-    }
-    buffer.append("expr = ");
-    buffer.append(expression);
-    return buffer.toString();
-  }
-
-  static class BuilderImpl implements Builder {
-
-    // max threshold for CNF conversion. having >8 elements in andList will be
-    // converted to maybe
-    private static final int CNF_COMBINATIONS_THRESHOLD = 256;
-
-    private final Deque<ExpressionTree> currentTree =
-        new ArrayDeque<ExpressionTree>();
-    private final Map<PredicateLeaf, Integer> leaves =
-        new HashMap<PredicateLeaf, Integer>();
-    private final ExpressionTree root =
-        new ExpressionTree(ExpressionTree.Operator.AND);
-    {
-      currentTree.add(root);
-    }
-
-    @Override
-    public Builder startOr() {
-      ExpressionTree node = new ExpressionTree(ExpressionTree.Operator.OR);
-      currentTree.getFirst().getChildren().add(node);
-      currentTree.addFirst(node);
-      return this;
-    }
-
-    @Override
-    public Builder startAnd() {
-      ExpressionTree node = new ExpressionTree(ExpressionTree.Operator.AND);
-      currentTree.getFirst().getChildren().add(node);
-      currentTree.addFirst(node);
-      return this;
-    }
-
-    @Override
-    public Builder startNot() {
-      ExpressionTree node = new ExpressionTree(ExpressionTree.Operator.NOT);
-      currentTree.getFirst().getChildren().add(node);
-      currentTree.addFirst(node);
-      return this;
-    }
-
-    @Override
-    public Builder end() {
-      ExpressionTree current = currentTree.removeFirst();
-      if (current.getChildren().size() == 0) {
-        throw new IllegalArgumentException("Can't create expression " + root +
-            " with no children.");
-      }
-      if (current.getOperator() == ExpressionTree.Operator.NOT &&
-          current.getChildren().size() != 1) {
-        throw new IllegalArgumentException("Can't create not expression " +
-            current + " with more than 1 child.");
-      }
-      return this;
-    }
-
-    private int addLeaf(PredicateLeaf leaf) {
-      Integer result = leaves.get(leaf);
-      if (result == null) {
-        int id = leaves.size();
-        leaves.put(leaf, id);
-        return id;
-      } else {
-        return result;
-      }
-    }
-
-    @Override
-    public Builder lessThan(String column, PredicateLeaf.Type type,
-                            Object literal) {
-      ExpressionTree parent = currentTree.getFirst();
-      if (column == null || literal == null) {
-        parent.getChildren().add(new ExpressionTree(TruthValue.YES_NO_NULL));
-      } else {
-        PredicateLeaf leaf =
-            new PredicateLeafImpl(PredicateLeaf.Operator.LESS_THAN,
-                type, column, literal, null);
-        parent.getChildren().add(new ExpressionTree(addLeaf(leaf)));
-      }
-      return this;
-    }
-
-    @Override
-    public Builder lessThanEquals(String column, PredicateLeaf.Type type,
-                                  Object literal) {
-      ExpressionTree parent = currentTree.getFirst();
-      if (column == null || literal == null) {
-        parent.getChildren().add(new ExpressionTree(TruthValue.YES_NO_NULL));
-      } else {
-        PredicateLeaf leaf =
-            new PredicateLeafImpl(PredicateLeaf.Operator.LESS_THAN_EQUALS,
-                type, column, literal, null);
-        parent.getChildren().add(new ExpressionTree(addLeaf(leaf)));
-      }
-      return this;
-    }
-
-    @Override
-    public Builder equals(String column, PredicateLeaf.Type type,
-                          Object literal) {
-      ExpressionTree parent = currentTree.getFirst();
-      if (column == null || literal == null) {
-        parent.getChildren().add(new ExpressionTree(TruthValue.YES_NO_NULL));
-      } else {
-        PredicateLeaf leaf =
-            new PredicateLeafImpl(PredicateLeaf.Operator.EQUALS,
-                type, column, literal, null);
-        parent.getChildren().add(new ExpressionTree(addLeaf(leaf)));
-      }
-      return this;
-    }
-
-    @Override
-    public Builder nullSafeEquals(String column, PredicateLeaf.Type type,
-                                  Object literal) {
-      ExpressionTree parent = currentTree.getFirst();
-      if (column == null || literal == null) {
-        parent.getChildren().add(new ExpressionTree(TruthValue.YES_NO_NULL));
-      } else {
-        PredicateLeaf leaf =
-            new PredicateLeafImpl(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
-                type, column, literal, null);
-        parent.getChildren().add(new ExpressionTree(addLeaf(leaf)));
-      }
-      return this;
-    }
-
-    @Override
-    public Builder in(String column, PredicateLeaf.Type type,
-                      Object... literal) {
-      ExpressionTree parent = currentTree.getFirst();
-      if (column  == null || literal == null) {
-        parent.getChildren().add(new ExpressionTree(TruthValue.YES_NO_NULL));
-      } else {
-        if (literal.length == 0) {
-          throw new IllegalArgumentException("Can't create in expression with "
-              + "no arguments");
-        }
-        List<Object> argList = new ArrayList<Object>();
-        argList.addAll(Arrays.asList(literal));
-
-        PredicateLeaf leaf =
-            new PredicateLeafImpl(PredicateLeaf.Operator.IN,
-                type, column, null, argList);
-        parent.getChildren().add(new ExpressionTree(addLeaf(leaf)));
-      }
-      return this;
-    }
-
-    @Override
-    public Builder isNull(String column, PredicateLeaf.Type type) {
-      ExpressionTree parent = currentTree.getFirst();
-      if (column == null) {
-        parent.getChildren().add(new ExpressionTree(TruthValue.YES_NO_NULL));
-      } else {
-        PredicateLeaf leaf =
-            new PredicateLeafImpl(PredicateLeaf.Operator.IS_NULL,
-                type, column, null, null);
-        parent.getChildren().add(new ExpressionTree(addLeaf(leaf)));
-      }
-      return this;
-    }
-
-    @Override
-    public Builder between(String column, PredicateLeaf.Type type, Object lower,
-                           Object upper) {
-      ExpressionTree parent = currentTree.getFirst();
-      if (column == null || lower == null || upper == null) {
-        parent.getChildren().add(new ExpressionTree(TruthValue.YES_NO_NULL));
-      } else {
-        List<Object> argList = new ArrayList<Object>();
-        argList.add(lower);
-        argList.add(upper);
-        PredicateLeaf leaf =
-            new PredicateLeafImpl(PredicateLeaf.Operator.BETWEEN,
-                type, column, null, argList);
-        parent.getChildren().add(new ExpressionTree(addLeaf(leaf)));
-      }
-      return this;
-    }
-
-    @Override
-    public Builder literal(TruthValue truth) {
-      ExpressionTree parent = currentTree.getFirst();
-      parent.getChildren().add(new ExpressionTree(truth));
-      return this;
-    }
-
-    /**
-     * Recursively explore the tree to find the leaves that are still reachable
-     * after optimizations.
-     * @param tree the node to check next
-     * @param next the next available leaf id
-     * @param leafReorder
-     * @return the next available leaf id
-     */
-    static int compactLeaves(ExpressionTree tree, int next, int[] leafReorder) {
-      if (tree.getOperator() == ExpressionTree.Operator.LEAF) {
-        int oldLeaf = tree.getLeaf();
-        if (leafReorder[oldLeaf] == -1) {
-          leafReorder[oldLeaf] = next++;
-        }
-      } else if (tree.getChildren() != null){
-        for(ExpressionTree child: tree.getChildren()) {
-          next = compactLeaves(child, next, leafReorder);
-        }
-      }
-      return next;
-    }
-
-    /**
-     * Rewrite expression tree to update the leaves.
-     * @param root the root of the tree to fix
-     * @param leafReorder a map from old leaf ids to new leaf ids
-     * @return the fixed root
-     */
-    static ExpressionTree rewriteLeaves(ExpressionTree root,
-                              int[] leafReorder) {
-      // The leaves could be shared in the tree. Use Set to remove the duplicates.
-      Set<ExpressionTree> leaves = new HashSet<ExpressionTree>();
-      Queue<ExpressionTree> nodes = new LinkedList<ExpressionTree>();
-      nodes.add(root);
-
-      while(!nodes.isEmpty()) {
-        ExpressionTree node = nodes.remove();
-        if (node.getOperator() == ExpressionTree.Operator.LEAF) {
-          leaves.add(node);
-        } else {
-          if (node.getChildren() != null){
-            nodes.addAll(node.getChildren());
-          }
-        }
-      }
-
-      // Update the leaf in place
-      for(ExpressionTree leaf : leaves) {
-        leaf.setLeaf(leafReorder[leaf.getLeaf()]);
-      }
-
-      return root;
-    }
-
-    @Override
-    public SearchArgument build() {
-      if (currentTree.size() != 1) {
-        throw new IllegalArgumentException("Failed to end " +
-            currentTree.size() + " operations.");
-      }
-      ExpressionTree optimized = pushDownNot(root);
-      optimized = foldMaybe(optimized);
-      optimized = flatten(optimized);
-      optimized = convertToCNF(optimized);
-      optimized = flatten(optimized);
-      int leafReorder[] = new int[leaves.size()];
-      Arrays.fill(leafReorder, -1);
-      int newLeafCount = compactLeaves(optimized, 0, leafReorder);
-      optimized = rewriteLeaves(optimized, leafReorder);
-      ArrayList<PredicateLeaf> leafList = new ArrayList<>(newLeafCount);
-      // expand list to correct size
-      for(int i=0; i < newLeafCount; ++i) {
-        leafList.add(null);
-      }
-      // build the new list
-      for(Map.Entry<PredicateLeaf, Integer> elem: leaves.entrySet()) {
-        int newLoc = leafReorder[elem.getValue()];
-        if (newLoc != -1) {
-          leafList.set(newLoc, elem.getKey());
-        }
-      }
-      return new SearchArgumentImpl(optimized, leafList);
-    }
-
-    /**
-     * Push the negations all the way to just before the leaves. Also remove
-     * double negatives.
-     * @param root the expression to normalize
-     * @return the normalized expression, which may share some or all of the
-     * nodes of the original expression.
-     */
-    static ExpressionTree pushDownNot(ExpressionTree root) {
-      if (root.getOperator() == ExpressionTree.Operator.NOT) {
-        ExpressionTree child = root.getChildren().get(0);
-        switch (child.getOperator()) {
-          case NOT:
-            return pushDownNot(child.getChildren().get(0));
-          case CONSTANT:
-            return  new ExpressionTree(child.getConstant().not());
-          case AND:
-            root = new ExpressionTree(ExpressionTree.Operator.OR);
-            for(ExpressionTree kid: child.getChildren()) {
-              root.getChildren().add(pushDownNot(new
-                  ExpressionTree(ExpressionTree.Operator.NOT, kid)));
-            }
-            break;
-          case OR:
-            root = new ExpressionTree(ExpressionTree.Operator.AND);
-            for(ExpressionTree kid: child.getChildren()) {
-              root.getChildren().add(pushDownNot(new ExpressionTree
-                  (ExpressionTree.Operator.NOT, kid)));
-            }
-            break;
-          // for leaf, we don't do anything
-          default:
-            break;
-        }
-      } else if (root.getChildren() != null) {
-        // iterate through children and push down not for each one
-        for(int i=0; i < root.getChildren().size(); ++i) {
-          root.getChildren().set(i, pushDownNot(root.getChildren().get(i)));
-        }
-      }
-      return root;
-    }
-
-    /**
-     * Remove MAYBE values from the expression. If they are in an AND operator,
-     * they are dropped. If they are in an OR operator, they kill their parent.
-     * This assumes that pushDownNot has already been called.
-     * @param expr The expression to clean up
-     * @return The cleaned up expression
-     */
-    static ExpressionTree foldMaybe(ExpressionTree expr) {
-      if (expr.getChildren() != null) {
-        for(int i=0; i < expr.getChildren().size(); ++i) {
-          ExpressionTree child = foldMaybe(expr.getChildren().get(i));
-          if (child.getConstant() == TruthValue.YES_NO_NULL) {
-            switch (expr.getOperator()) {
-              case AND:
-                expr.getChildren().remove(i);
-                i -= 1;
-                break;
-              case OR:
-                // a maybe will kill the or condition
-                return child;
-              default:
-                throw new IllegalStateException("Got a maybe as child of " +
-                    expr);
-            }
-          } else {
-            expr.getChildren().set(i, child);
-          }
-        }
-        if (expr.getChildren().isEmpty()) {
-          return new ExpressionTree(TruthValue.YES_NO_NULL);
-        }
-      }
-      return expr;
-    }
-
-    /**
-     * Converts multi-level ands and ors into single level ones.
-     * @param root the expression to flatten
-     * @return the flattened expression, which will always be root with
-     *   potentially modified children.
-     */
-    static ExpressionTree flatten(ExpressionTree root) {
-      if (root.getChildren() != null) {
-        // iterate through the index, so that if we add more children,
-        // they don't get re-visited
-        for(int i=0; i < root.getChildren().size(); ++i) {
-          ExpressionTree child = flatten(root.getChildren().get(i));
-          // do we need to flatten?
-          if (child.getOperator() == root.getOperator() &&
-              child.getOperator() != ExpressionTree.Operator.NOT) {
-            boolean first = true;
-            for(ExpressionTree grandkid: child.getChildren()) {
-              // for the first grandkid replace the original parent
-              if (first) {
-                first = false;
-                root.getChildren().set(i, grandkid);
-              } else {
-                root.getChildren().add(++i, grandkid);
-              }
-            }
-          } else {
-            root.getChildren().set(i, child);
-          }
-        }
-        // if we have a singleton AND or OR, just return the child
-        if ((root.getOperator() == ExpressionTree.Operator.OR ||
-            root.getOperator() == ExpressionTree.Operator.AND) &&
-            root.getChildren().size() == 1) {
-          return root.getChildren().get(0);
-        }
-      }
-      return root;
-    }
-
-    /**
-     * Generate all combinations of items on the andList. For each item on the
-     * andList, it generates all combinations of one child from each and
-     * expression. Thus, (and a b) (and c d) will be expanded to: (or a c)
-     * (or a d) (or b c) (or b d). If there are items on the nonAndList, they
-     * are added to each or expression.
-     * @param result a list to put the results onto
-     * @param andList a list of and expressions
-     * @param nonAndList a list of non-and expressions
-     */
-    private static void generateAllCombinations(List<ExpressionTree> result,
-                                                List<ExpressionTree> andList,
-                                                List<ExpressionTree> nonAndList
-    ) {
-      List<ExpressionTree> kids = andList.get(0).getChildren();
-      if (result.isEmpty()) {
-        for(ExpressionTree kid: kids) {
-          ExpressionTree or = new ExpressionTree(ExpressionTree.Operator.OR);
-          result.add(or);
-          for(ExpressionTree node: nonAndList) {
-            or.getChildren().add(new ExpressionTree(node));
-          }
-          or.getChildren().add(kid);
-        }
-      } else {
-        List<ExpressionTree> work = new ArrayList<ExpressionTree>(result);
-        result.clear();
-        for(ExpressionTree kid: kids) {
-          for(ExpressionTree or: work) {
-            ExpressionTree copy = new ExpressionTree(or);
-            copy.getChildren().add(kid);
-            result.add(copy);
-          }
-        }
-      }
-      if (andList.size() > 1) {
-        generateAllCombinations(result, andList.subList(1, andList.size()),
-            nonAndList);
-      }
-    }
-
-    /**
-     * Convert an expression so that the top level operator is AND with OR
-     * operators under it. This routine assumes that all of the NOT operators
-     * have been pushed to the leaves via pushdDownNot.
-     * @param root the expression
-     * @return the normalized expression
-     */
-    static ExpressionTree convertToCNF(ExpressionTree root) {
-      if (root.getChildren() != null) {
-        // convert all of the children to CNF
-        int size = root.getChildren().size();
-        for(int i=0; i < size; ++i) {
-          root.getChildren().set(i, convertToCNF(root.getChildren().get(i)));
-        }
-        if (root.getOperator() == ExpressionTree.Operator.OR) {
-          // a list of leaves that weren't under AND expressions
-          List<ExpressionTree> nonAndList = new ArrayList<ExpressionTree>();
-          // a list of AND expressions that we need to distribute
-          List<ExpressionTree> andList = new ArrayList<ExpressionTree>();
-          for(ExpressionTree child: root.getChildren()) {
-            if (child.getOperator() == ExpressionTree.Operator.AND) {
-              andList.add(child);
-            } else if (child.getOperator() == ExpressionTree.Operator.OR) {
-              // pull apart the kids of the OR expression
-              for(ExpressionTree grandkid: child.getChildren()) {
-                nonAndList.add(grandkid);
-              }
-            } else {
-              nonAndList.add(child);
-            }
-          }
-          if (!andList.isEmpty()) {
-            if (checkCombinationsThreshold(andList)) {
-              root = new ExpressionTree(ExpressionTree.Operator.AND);
-              generateAllCombinations(root.getChildren(), andList, nonAndList);
-            } else {
-              root = new ExpressionTree(TruthValue.YES_NO_NULL);
-            }
-          }
-        }
-      }
-      return root;
-    }
-
-    private static boolean checkCombinationsThreshold(List<ExpressionTree> andList) {
-      int numComb = 1;
-      for (ExpressionTree tree : andList) {
-        numComb *= tree.getChildren().size();
-        if (numComb > CNF_COMBINATIONS_THRESHOLD) {
-          return false;
-        }
-      }
-      return true;
-    }
-
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/util/JavaDataModel.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/util/JavaDataModel.java
deleted file mode 100644
index 4a745e4..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/util/JavaDataModel.java
+++ /dev/null
@@ -1,345 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.util;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Estimation of memory footprint of object
- */
-public enum JavaDataModel {
-
-  JAVA32 {
-    @Override
-    public int object() {
-      return JAVA32_OBJECT;
-    }
-
-    @Override
-    public int array() {
-      return JAVA32_ARRAY;
-    }
-
-    @Override
-    public int ref() {
-      return JAVA32_REF;
-    }
-
-    @Override
-    public int hashMap(int entry) {
-      // base  = JAVA32_OBJECT + PRIMITIVES1 * 4 + JAVA32_FIELDREF * 3 + JAVA32_ARRAY;
-      // entry = JAVA32_OBJECT + JAVA32_FIELDREF + PRIMITIVES1
-      return hashMapBase() + hashMapEntry() * entry;
-    }
-
-    @Override
-    public int hashMapBase() {
-      return 64;
-    }
-
-    @Override
-    public int hashMapEntry() {
-      return 24;
-    }
-
-    @Override
-    public int hashSet(int entry) {
-      // hashMap += JAVA32_OBJECT
-      return hashSetBase() + hashSetEntry() * entry;
-    }
-
-    @Override
-    public int hashSetBase() {
-      return 80;
-    }
-
-    @Override
-    public int hashSetEntry() {
-      return 24;
-    }
-
-    @Override
-    public int linkedHashMap(int entry) {
-      // hashMap += JAVA32_FIELDREF + PRIMITIVES1
-      // hashMap.entry += JAVA32_FIELDREF * 2
-      return 72 + 32 * entry;
-    }
-
-    @Override
-    public int linkedList(int entry) {
-      // base  = JAVA32_OBJECT + PRIMITIVES1 * 2 + JAVA32_FIELDREF;
-      // entry = JAVA32_OBJECT + JAVA32_FIELDREF * 2
-      return linkedListBase() + linkedListEntry() * entry;
-     }
-
-     @Override
-     public int linkedListBase() {
-       return 28;
-     }
-
-     @Override
-     public int linkedListEntry() {
-       return 24;
-     }
-
-    @Override
-    public int arrayList() {
-      // JAVA32_OBJECT + PRIMITIVES1 * 2 + JAVA32_ARRAY;
-      return 44;
-    }
-
-    @Override
-    public int memoryAlign() {
-      return 8;
-    }
-  }, JAVA64 {
-    @Override
-    public int object() {
-      return JAVA64_OBJECT;
-    }
-
-    @Override
-    public int array() {
-      return JAVA64_ARRAY;
-    }
-
-    @Override
-    public int ref() {
-      return JAVA64_REF;
-    }
-
-    @Override
-    public int hashMap(int entry) {
-      // base  = JAVA64_OBJECT + PRIMITIVES1 * 4 + JAVA64_FIELDREF * 3 + JAVA64_ARRAY;
-      // entry = JAVA64_OBJECT + JAVA64_FIELDREF + PRIMITIVES1
-      return hashMapBase() + hashMapEntry() * entry;
-    }
-
-    @Override
-    public int hashMapBase() {
-      return 112;
-    }
-
-
-    @Override
-    public int hashMapEntry() {
-      return 44;
-    }
-
-    @Override
-    public int hashSet(int entry) {
-      // hashMap += JAVA64_OBJECT
-      return hashSetBase() + hashSetEntry() * entry;
-     }
-
-     @Override
-     public int hashSetBase() {
-       return 144;
-     }
-
-     @Override
-     public int hashSetEntry() {
-       return 44;
-     }
-
-    @Override
-    public int linkedHashMap(int entry) {
-      // hashMap += JAVA64_FIELDREF + PRIMITIVES1
-      // hashMap.entry += JAVA64_FIELDREF * 2
-      return 128 + 60 * entry;
-    }
-
-    @Override
-    public int linkedList(int entry) {
-      // base  = JAVA64_OBJECT + PRIMITIVES1 * 2 + JAVA64_FIELDREF;
-      // entry = JAVA64_OBJECT + JAVA64_FIELDREF * 2
-      return linkedListBase() + linkedListEntry() * entry;
-     }
-
-     @Override
-     public int linkedListBase() {
-       return 48;
-     }
-
-     @Override
-     public int linkedListEntry() {
-       return 48;
-     }
-
-    @Override
-    public int arrayList() {
-      // JAVA64_OBJECT + PRIMITIVES1 * 2 + JAVA64_ARRAY;
-      return 80;
-    }
-
-    @Override
-    public int memoryAlign() {
-      return 8;
-    }
-  };
-
-  public abstract int object();
-  public abstract int array();
-  public abstract int ref();
-  public abstract int hashMap(int entry);
-  public abstract int hashMapBase();
-  public abstract int hashMapEntry();
-  public abstract int hashSetBase();
-  public abstract int hashSetEntry();
-  public abstract int hashSet(int entry);
-  public abstract int linkedHashMap(int entry);
-  public abstract int linkedListBase();
-  public abstract int linkedListEntry();
-  public abstract int linkedList(int entry);
-  public abstract int arrayList();
-  public abstract int memoryAlign();
-
-  // ascii string
-  public int lengthFor(String string) {
-    return lengthForStringOfLength(string.length());
-  }
-
-  public int lengthForRandom() {
-    // boolean + double + AtomicLong
-    return object() + primitive1() + primitive2() + object() + primitive2();
-  }
-
-  public int primitive1() {
-    return PRIMITIVES1;
-  }
-  public int primitive2() {
-    return PRIMITIVES2;
-  }
-
-  public static int alignUp(int value, int align) {
-    return (value + align - 1) & ~(align - 1);
-  }
-
-  private static final Logger LOG = LoggerFactory.getLogger(JavaDataModel.class);
-
-  public static final int JAVA32_META = 12;
-  public static final int JAVA32_ARRAY_META = 16;
-  public static final int JAVA32_REF = 4;
-  public static final int JAVA32_OBJECT = 16;   // JAVA32_META + JAVA32_REF
-  public static final int JAVA32_ARRAY = 20;    // JAVA32_ARRAY_META + JAVA32_REF
-
-  public static final int JAVA64_META = 24;
-  public static final int JAVA64_ARRAY_META = 32;
-  public static final int JAVA64_REF = 8;
-  public static final int JAVA64_OBJECT = 32;   // JAVA64_META + JAVA64_REF
-  public static final int JAVA64_ARRAY = 40;    // JAVA64_ARRAY_META + JAVA64_REF
-
-  public static final int PRIMITIVES1 = 4;      // void, boolean, byte, short, int, float
-  public static final int PRIMITIVES2 = 8;      // long, double
-
-  public static final int PRIMITIVE_BYTE = 1;    // byte
-
-  private static final class LazyHolder {
-    private static final JavaDataModel MODEL_FOR_SYSTEM = getModelForSystem();
-  }
-
-  //@VisibleForTesting
-  static JavaDataModel getModelForSystem() {
-    String props = null;
-    try {
-      props = System.getProperty("sun.arch.data.model");
-    } catch (Exception e) {
-      LOG.warn("Failed to determine java data model, defaulting to 64", e);
-    }
-    if ("32".equals(props)) {
-      return JAVA32;
-    }
-    // TODO: separate model is needed for compressedOops, which can be guessed from memory size.
-    return JAVA64;
-  }
-
-  public static JavaDataModel get() {
-    return LazyHolder.MODEL_FOR_SYSTEM;
-  }
-
-  public static int round(int size) {
-    JavaDataModel model = get();
-    if (model == JAVA32 || size % 8 == 0) {
-      return size;
-    }
-    return ((size + 8) >> 3) << 3;
-  }
-
-  private int lengthForPrimitiveArrayOfSize(int primitiveSize, int length) {
-    return alignUp(array() + primitiveSize*length, memoryAlign());
-  }
-
-  public int lengthForByteArrayOfSize(int length) {
-    return lengthForPrimitiveArrayOfSize(PRIMITIVE_BYTE, length);
-  }
-  public int lengthForObjectArrayOfSize(int length) {
-    return lengthForPrimitiveArrayOfSize(ref(), length);
-  }
-  public int lengthForLongArrayOfSize(int length) {
-    return lengthForPrimitiveArrayOfSize(primitive2(), length);
-  }
-  public int lengthForDoubleArrayOfSize(int length) {
-    return lengthForPrimitiveArrayOfSize(primitive2(), length);
-  }
-  public int lengthForIntArrayOfSize(int length) {
-    return lengthForPrimitiveArrayOfSize(primitive1(), length);
-  }
-  public int lengthForBooleanArrayOfSize(int length) {
-    return lengthForPrimitiveArrayOfSize(PRIMITIVE_BYTE, length);
-  }
-  public int lengthForTimestampArrayOfSize(int length) {
-    return lengthForPrimitiveArrayOfSize(lengthOfTimestamp(), length);
-  }
-  public int lengthForDateArrayOfSize(int length) {
-    return lengthForPrimitiveArrayOfSize(lengthOfDate(), length);
-  }
-  public int lengthForDecimalArrayOfSize(int length) {
-    return lengthForPrimitiveArrayOfSize(lengthOfDecimal(), length);
-  }
-
-  public int lengthOfDecimal() {
-    // object overhead + 8 bytes for intCompact + 4 bytes for precision
-    // + 4 bytes for scale + size of BigInteger
-    return object() + 2 * primitive2() + lengthOfBigInteger();
-  }
-
-  private int lengthOfBigInteger() {
-    // object overhead + 4 bytes for bitCount + 4 bytes for bitLength
-    // + 4 bytes for firstNonzeroByteNum + 4 bytes for firstNonzeroIntNum +
-    // + 4 bytes for lowestSetBit + 5 bytes for size of magnitude (since max precision
-    // is only 38 for HiveDecimal) + 7 bytes of padding (since java memory allocations
-    // are 8 byte aligned)
-    return object() + 4 * primitive2();
-  }
-
-  public int lengthOfTimestamp() {
-    // object overhead + 4 bytes for int (nanos) + 4 bytes of padding
-    return object() + primitive2();
-  }
-
-  public int lengthOfDate() {
-    // object overhead + 8 bytes for long (fastTime) + 16 bytes for cdate
-    return object() + 3 * primitive2();
-  }
-
-  public int lengthForStringOfLength(int strLen) {
-    return object() + primitive1() * 3 + array() + strLen;
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/ql/util/TimestampUtils.java b/java/storage-api/src/java/org/apache/hadoop/hive/ql/util/TimestampUtils.java
deleted file mode 100644
index 41db9ca..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/ql/util/TimestampUtils.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.util;
-
-import org.apache.hadoop.hive.common.type.HiveDecimal;
-
-import java.math.BigDecimal;
-import java.sql.Timestamp;
-
-/**
- * Utitilities for Timestamps and the relevant conversions.
- */
-public class TimestampUtils {
-  public static final BigDecimal BILLION_BIG_DECIMAL = BigDecimal.valueOf(1000000000);
-
-  /**
-   * Convert the timestamp to a double measured in seconds.
-   * @return double representation of the timestamp, accurate to nanoseconds
-   */
-  public static double getDouble(Timestamp ts) {
-    long seconds = millisToSeconds(ts.getTime());
-    return seconds + ((double) ts.getNanos()) / 1000000000;
-  }
-
-  public static Timestamp doubleToTimestamp(double f) {
-    try {
-      long seconds = (long) f;
-
-      // We must ensure the exactness of the double's fractional portion.
-      // 0.6 as the fraction part will be converted to 0.59999... and
-      // significantly reduce the savings from binary serialization
-      BigDecimal bd = new BigDecimal(String.valueOf(f));
-
-      bd = bd.subtract(new BigDecimal(seconds)).multiply(new BigDecimal(1000000000));
-      int nanos = bd.intValue();
-
-      // Convert to millis
-      long millis = seconds * 1000;
-      if (nanos < 0) {
-        millis -= 1000;
-        nanos += 1000000000;
-      }
-      Timestamp t = new Timestamp(millis);
-
-      // Set remaining fractional portion to nanos
-      t.setNanos(nanos);
-      return t;
-    } catch (NumberFormatException nfe) {
-      return null;
-    } catch (IllegalArgumentException iae) {
-      return null;
-    }
-  }
-
-  public static Timestamp decimalToTimestamp(HiveDecimal d) {
-    try {
-      BigDecimal nanoInstant = d.bigDecimalValue().multiply(BILLION_BIG_DECIMAL);
-      int nanos = nanoInstant.remainder(BILLION_BIG_DECIMAL).intValue();
-      if (nanos < 0) {
-        nanos += 1000000000;
-      }
-      long seconds =
-          nanoInstant.subtract(new BigDecimal(nanos)).divide(BILLION_BIG_DECIMAL).longValue();
-      Timestamp t = new Timestamp(seconds * 1000);
-      t.setNanos(nanos);
-
-      return t;
-    } catch (NumberFormatException nfe) {
-      return null;
-    } catch (IllegalArgumentException iae) {
-      return null;
-    }
-  }
-
-  /**
-   * Rounds the number of milliseconds relative to the epoch down to the nearest whole number of
-   * seconds. 500 would round to 0, -500 would round to -1.
-   */
-  public static long millisToSeconds(long millis) {
-    if (millis >= 0) {
-      return millis / 1000;
-    } else {
-      return (millis - 999) / 1000;
-    }
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/serde2/io/DateWritable.java b/java/storage-api/src/java/org/apache/hadoop/hive/serde2/io/DateWritable.java
deleted file mode 100644
index 637720a..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/serde2/io/DateWritable.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.serde2.io;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.sql.Date;
-import java.util.Calendar;
-import java.util.GregorianCalendar;
-import java.util.TimeZone;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.io.WritableUtils;
-
-
-/**
- * DateWritable
- * Writable equivalent of java.sql.Date.
- *
- * Dates are of the format
- *    YYYY-MM-DD
- *
- */
-public class DateWritable implements WritableComparable<DateWritable> {
-
-  private static final long MILLIS_PER_DAY = TimeUnit.DAYS.toMillis(1);
-
-  // Local time zone. Store separately because Calendar would clone it.
-  // Java TimeZone has no mention of thread safety. Use thread local instance to be safe.
-  private static final ThreadLocal<TimeZone> LOCAL_TIMEZONE = new ThreadLocal<TimeZone>() {
-    @Override
-    protected TimeZone initialValue() {
-      return Calendar.getInstance().getTimeZone();
-    }
-  };
-
-  private static final ThreadLocal<Calendar> UTC_CALENDAR = new ThreadLocal<Calendar>() {
-    @Override
-    protected Calendar initialValue() {
-      return new GregorianCalendar(TimeZone.getTimeZone("UTC"));
-    }
-  };
-  private static final ThreadLocal<Calendar> LOCAL_CALENDAR = new ThreadLocal<Calendar>() {
-    @Override
-    protected Calendar initialValue() {
-      return Calendar.getInstance();
-    }
-  };
-
-  // Internal representation is an integer representing day offset from our epoch value 1970-01-01
-  private int daysSinceEpoch = 0;
-
-  /* Constructors */
-  public DateWritable() {
-  }
-
-  public DateWritable(DateWritable d) {
-    set(d);
-  }
-
-  public DateWritable(Date d) {
-    set(d);
-  }
-
-  public DateWritable(int d) {
-    set(d);
-  }
-
-  /**
-   * Set the DateWritable based on the days since epoch date.
-   * @param d integer value representing days since epoch date
-   */
-  public void set(int d) {
-    daysSinceEpoch = d;
-  }
-
-  /**
-   * Set the DateWritable based on the year/month/day of the date in the local timezone.
-   * @param d Date value
-   */
-  public void set(Date d) {
-    if (d == null) {
-      daysSinceEpoch = 0;
-      return;
-    }
-
-    set(dateToDays(d));
-  }
-
-  public void set(DateWritable d) {
-    set(d.daysSinceEpoch);
-  }
-
-  /**
-   * @return Date value corresponding to the date in the local time zone
-   */
-  public Date get() {
-    return get(true);
-  }
-
-  // TODO: we should call this more often. In theory, for DATE type, time should never matter, but
-  //       it's hard to tell w/some code paths like UDFs/OIs etc. that are used in many places.
-  public Date get(boolean doesTimeMatter) {
-    return new Date(daysToMillis(daysSinceEpoch, doesTimeMatter));
-  }
-
-  public int getDays() {
-    return daysSinceEpoch;
-  }
-
-  /**
-   *
-   * @return time in seconds corresponding to this DateWritable
-   */
-  public long getTimeInSeconds() {
-    return get().getTime() / 1000;
-  }
-
-  public static Date timeToDate(long l) {
-    return new Date(l * 1000);
-  }
-
-  public static long daysToMillis(int d) {
-    return daysToMillis(d, true);
-  }
-
-  public static long daysToMillis(int d, boolean doesTimeMatter) {
-    // What we are trying to get is the equivalent of new Date(ymd).getTime() in the local tz,
-    // where ymd is whatever d represents. How it "works" is this.
-    // First we get the UTC midnight for that day (which always exists, a small island of sanity).
-    long utcMidnight = d * MILLIS_PER_DAY;
-    // Now we take a local TZ offset at midnight UTC. Say we are in -4; that means (surprise
-    // surprise) that at midnight UTC it was 20:00 in local. So far we are on firm ground.
-    long utcMidnightOffset = LOCAL_TIMEZONE.get().getOffset(utcMidnight);
-    // And now we wander straight into the swamp, when instead of adding, we subtract it from UTC
-    // midnight to supposedly get local midnight (in the above case, 4:00 UTC). Of course, given
-    // all the insane DST variations, where we actually end up is anyone's guess.
-    long hopefullyMidnight = utcMidnight - utcMidnightOffset;
-    // Then we determine the local TZ offset at that magical time.
-    long offsetAtHM = LOCAL_TIMEZONE.get().getOffset(hopefullyMidnight);
-    // If the offsets are the same, we assume our initial jump did not cross any DST boundaries,
-    // and is thus valid. Both times flowed at the same pace. We congratulate ourselves and bail.
-    if (utcMidnightOffset == offsetAtHM) return hopefullyMidnight;
-    // Alas, we crossed some DST boundary. If the time of day doesn't matter to the caller, we'll
-    // simply get the next day and go back half a day. This is not ideal but seems to work.
-    if (!doesTimeMatter) return daysToMillis(d + 1) - (MILLIS_PER_DAY >> 1);
-    // Now, we could get previous and next day, figure our how many hours were inserted or removed,
-    // and from which of the days, etc. But at this point our gun is pointing straight at our foot,
-    // so let's just go the safe, expensive way.
-    Calendar utc = UTC_CALENDAR.get(), local = LOCAL_CALENDAR.get();
-    utc.setTimeInMillis(utcMidnight);
-    local.set(utc.get(Calendar.YEAR), utc.get(Calendar.MONTH), utc.get(Calendar.DAY_OF_MONTH));
-    return local.getTimeInMillis();
-  }
-
-  public static int millisToDays(long millisLocal) {
-    // We assume millisLocal is midnight of some date. What we are basically trying to do
-    // here is go from local-midnight to UTC-midnight (or whatever time that happens to be).
-    long millisUtc = millisLocal + LOCAL_TIMEZONE.get().getOffset(millisLocal);
-    int days;
-    if (millisUtc >= 0L) {
-      days = (int) (millisUtc / MILLIS_PER_DAY);
-    } else {
-      days = (int) ((millisUtc - 86399999 /*(MILLIS_PER_DAY - 1)*/) / MILLIS_PER_DAY);
-    }
-    return days;
-  }
-
-  public static int dateToDays(Date d) {
-    // convert to equivalent time in UTC, then get day offset
-    long millisLocal = d.getTime();
-    return millisToDays(millisLocal);
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    daysSinceEpoch = WritableUtils.readVInt(in);
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    WritableUtils.writeVInt(out, daysSinceEpoch);
-  }
-
-  @Override
-  public int compareTo(DateWritable d) {
-    return daysSinceEpoch - d.daysSinceEpoch;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (!(o instanceof DateWritable)) {
-      return false;
-    }
-    return compareTo((DateWritable) o) == 0;
-  }
-
-  @Override
-  public String toString() {
-    // For toString, the time does not matter
-    return get(false).toString();
-  }
-
-  @Override
-  public int hashCode() {
-    return daysSinceEpoch;
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hadoop/hive/serde2/io/HiveDecimalWritable.java b/java/storage-api/src/java/org/apache/hadoop/hive/serde2/io/HiveDecimalWritable.java
deleted file mode 100644
index 41452da..0000000
--- a/java/storage-api/src/java/org/apache/hadoop/hive/serde2/io/HiveDecimalWritable.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.serde2.io;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.math.BigInteger;
-
-import org.apache.hadoop.hive.common.type.HiveDecimal;
-
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.io.WritableUtils;
-
-public class HiveDecimalWritable implements WritableComparable<HiveDecimalWritable> {
-
-  private byte[] internalStorage = new byte[0];
-  private int scale;
-
-  public HiveDecimalWritable() {
-  }
-
-  public HiveDecimalWritable(String value) {
-    set(HiveDecimal.create(value));
-  }
-
-  public HiveDecimalWritable(byte[] bytes, int scale) {
-    set(bytes, scale);
-  }
-
-  public HiveDecimalWritable(HiveDecimalWritable writable) {
-    set(writable.getHiveDecimal());
-  }
-
-  public HiveDecimalWritable(HiveDecimal value) {
-    set(value);
-  }
-
-  public HiveDecimalWritable(long value) {
-    set((HiveDecimal.create(value)));
-  }
-
-  public void set(HiveDecimal value) {
-    set(value.unscaledValue().toByteArray(), value.scale());
-  }
-
-  public void set(HiveDecimal value, int maxPrecision, int maxScale) {
-    set(HiveDecimal.enforcePrecisionScale(value, maxPrecision, maxScale));
-  }
-
-  public void set(HiveDecimalWritable writable) {
-    set(writable.getHiveDecimal());
-  }
-
-  public void set(byte[] bytes, int scale) {
-    this.internalStorage = bytes;
-    this.scale = scale;
-  }
-
-  public HiveDecimal getHiveDecimal() {
-    return HiveDecimal.create(new BigInteger(internalStorage), scale);
-  }
-
-  /**
-   * Get a HiveDecimal instance from the writable and constraint it with maximum precision/scale.
-   *
-   * @param maxPrecision maximum precision
-   * @param maxScale maximum scale
-   * @return HiveDecimal instance
-   */
-  public HiveDecimal getHiveDecimal(int maxPrecision, int maxScale) {
-     return HiveDecimal.enforcePrecisionScale(HiveDecimal.
-             create(new BigInteger(internalStorage), scale),
-         maxPrecision, maxScale);
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    scale = WritableUtils.readVInt(in);
-    int byteArrayLen = WritableUtils.readVInt(in);
-    if (internalStorage.length != byteArrayLen) {
-      internalStorage = new byte[byteArrayLen];
-    }
-    in.readFully(internalStorage);
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    WritableUtils.writeVInt(out, scale);
-    WritableUtils.writeVInt(out, internalStorage.length);
-    out.write(internalStorage);
-  }
-
-  @Override
-  public int compareTo(HiveDecimalWritable that) {
-    return getHiveDecimal().compareTo(that.getHiveDecimal());
-  }
-
-  @Override
-  public String toString() {
-    return getHiveDecimal().toString();
-  }
-
-  @Override
-  public boolean equals(Object other) {
-    if (this == other) {
-      return true;
-    }
-    if (other == null || getClass() != other.getClass()) {
-      return false;
-    }
-    HiveDecimalWritable bdw = (HiveDecimalWritable) other;
-
-    // 'equals' and 'compareTo' are not compatible with HiveDecimals. We want
-    // compareTo which returns true iff the numbers are equal (e.g.: 3.14 is
-    // the same as 3.140). 'Equals' returns true iff equal and the same scale
-    // is set in the decimals (e.g.: 3.14 is not the same as 3.140)
-    return getHiveDecimal().compareTo(bdw.getHiveDecimal()) == 0;
-  }
-
-  @Override
-  public int hashCode() {
-    return getHiveDecimal().hashCode();
-  }
-
-  /* (non-Javadoc)
-   * In order to update a Decimal128 fast (w/o allocation) we need to expose access to the
-   * internal storage bytes and scale.
-   * @return
-   */
-  public byte[] getInternalStorage() {
-    return internalStorage;
-  }
-
-  /* (non-Javadoc)
-   * In order to update a Decimal128 fast (w/o allocation) we need to expose access to the
-   * internal storage bytes and scale.
-   */
-  public int getScale() {
-    return scale;
-  }
-
-  public static
-  HiveDecimalWritable enforcePrecisionScale(HiveDecimalWritable writable,
-                                            int precision, int scale) {
-    if (writable == null) {
-      return null;
-    }
-
-    HiveDecimal dec =
-        HiveDecimal.enforcePrecisionScale(writable.getHiveDecimal(), precision,
-            scale);
-    return dec == null ? null : new HiveDecimalWritable(dec);
-  }
-}
diff --git a/java/storage-api/src/java/org/apache/hive/common/util/IntervalDayTimeUtils.java b/java/storage-api/src/java/org/apache/hive/common/util/IntervalDayTimeUtils.java
deleted file mode 100644
index 727c1e6..0000000
--- a/java/storage-api/src/java/org/apache/hive/common/util/IntervalDayTimeUtils.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hive.common.util;
-
-import java.math.BigDecimal;
-import java.text.SimpleDateFormat;
-
-import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
-
-
-/**
- * DateUtils. Thread-safe class
- *
- */
-public class IntervalDayTimeUtils {
-
-  private static final ThreadLocal<SimpleDateFormat> dateFormatLocal = new ThreadLocal<SimpleDateFormat>() {
-    @Override
-    protected SimpleDateFormat initialValue() {
-      return new SimpleDateFormat("yyyy-MM-dd");
-    }
-  };
-
-  public static SimpleDateFormat getDateFormat() {
-    return dateFormatLocal.get();
-  }
-
-  public static final int NANOS_PER_SEC = 1000000000;
-  public static final BigDecimal MAX_INT_BD = new BigDecimal(Integer.MAX_VALUE);
-  public static final BigDecimal NANOS_PER_SEC_BD = new BigDecimal(NANOS_PER_SEC);
-
-  public static int parseNumericValueWithRange(String fieldName,
-      String strVal, int minValue, int maxValue) throws IllegalArgumentException {
-    int result = 0;
-    if (strVal != null) {
-      result = Integer.parseInt(strVal);
-      if (result < minValue || result > maxValue) {
-        throw new IllegalArgumentException(String.format("%s value %d outside range [%d, %d]",
-            fieldName, result, minValue, maxValue));
-      }
-    }
-    return result;
-  }
-
-  public static long getIntervalDayTimeTotalNanos(HiveIntervalDayTime intervalDayTime) {
-    return intervalDayTime.getTotalSeconds() * NANOS_PER_SEC + intervalDayTime.getNanos();
-  }
-
-  public static void setIntervalDayTimeTotalNanos(HiveIntervalDayTime intervalDayTime,
-      long totalNanos) {
-    intervalDayTime.set(totalNanos / NANOS_PER_SEC, (int) (totalNanos % NANOS_PER_SEC));
-  }
-
-  public static long getIntervalDayTimeTotalSecondsFromTotalNanos(long totalNanos) {
-    return totalNanos / NANOS_PER_SEC;
-  }
-
-  public static int getIntervalDayTimeNanosFromTotalNanos(long totalNanos) {
-    return (int) (totalNanos % NANOS_PER_SEC);
-  }
-}
diff --git a/java/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestListColumnVector.java b/java/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestListColumnVector.java
deleted file mode 100644
index 395d8f5..0000000
--- a/java/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestListColumnVector.java
+++ /dev/null
@@ -1,200 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec.vector;
-
-import org.junit.Test;
-
-import java.util.Arrays;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test for ListColumnVector
- */
-public class TestListColumnVector {
-
-  @Test
-  public void testFlatten() throws Exception {
-    LongColumnVector col1 = new LongColumnVector(10);
-    ListColumnVector vector = new ListColumnVector(10, col1);
-    vector.init();
-
-    // TEST - repeating NULL & no selection
-    col1.isRepeating = true;
-    vector.isRepeating = true;
-    vector.noNulls = false;
-    vector.isNull[0] = true;
-    vector.childCount = 0;
-    for(int i=0; i < 10; ++i) {
-      col1.vector[i] = i + 3;
-      vector.offsets[i] = i;
-      vector.lengths[i] = 10 + i;
-    }
-    vector.flatten(false, null, 10);
-    // make sure the vector was flattened
-    assertFalse(vector.isRepeating);
-    assertFalse(vector.noNulls);
-    // child isn't flattened, because parent is repeating null
-    assertTrue(col1.isRepeating);
-    assertTrue(col1.noNulls);
-    for(int i=0; i < 10; ++i) {
-      assertTrue("isNull at " + i, vector.isNull[i]);
-    }
-    for(int i=0; i < 10; ++i) {
-      StringBuilder buf = new StringBuilder();
-      vector.stringifyValue(buf, i);
-      assertEquals("null", buf.toString());
-    }
-    vector.unFlatten();
-    assertTrue(col1.isRepeating);
-    assertTrue(vector.isRepeating);
-
-    // TEST - repeating NULL & selection
-    Arrays.fill(vector.isNull, 1, 10, false);
-    int[] sel = new int[]{3, 5, 7};
-    vector.flatten(true, sel, 3);
-    for(int i=1; i < 10; i++) {
-      assertEquals("failure at " + i,
-          i == 3 || i == 5 || i == 7, vector.isNull[i]);
-    }
-    vector.unFlatten();
-
-    // TEST - repeating non-NULL & no-selection
-    vector.noNulls = true;
-    vector.isRepeating = true;
-    vector.offsets[0] = 0;
-    vector.lengths[0] = 3;
-    vector.childCount = 3;
-    vector.flatten(false, null, 10);
-    // make sure the vector was flattened
-    assertFalse(vector.isRepeating);
-    assertFalse(vector.noNulls);
-    assertFalse(col1.isRepeating);
-    assertFalse(col1.noNulls);
-    for(int i=0; i < 10; ++i) {
-      assertEquals("offset at " + i, 0, vector.offsets[i]);
-      assertEquals("length at " + i, 3, vector.lengths[i]);
-    }
-    for(int i=0; i < 10; ++i) {
-      StringBuilder buf = new StringBuilder();
-      vector.stringifyValue(buf, i);
-      assertEquals("[3, 3, 3]", buf.toString());
-    }
-    vector.unFlatten();
-    assertTrue(col1.isRepeating);
-    assertTrue(col1.noNulls);
-    assertTrue(vector.isRepeating);
-    assertTrue(vector.noNulls);
-
-    // TEST - repeating non-NULL & selection
-    Arrays.fill(vector.offsets, 1, 10, -1);
-    Arrays.fill(vector.lengths, 1, 10, -1);
-    Arrays.fill(col1.vector, 1, 10, -1);
-    vector.flatten(true, sel, 3);
-    for(int i=1; i < 10; i++) {
-      if (i == 3 || i == 5 || i == 7) {
-        assertEquals("failure at " + i, 0, vector.offsets[i]);
-        assertEquals("failure at " + i, 3, vector.lengths[i]);
-      } else {
-        assertEquals("failure at " + i, -1, vector.offsets[i]);
-        assertEquals("failure at " + i, -1, vector.lengths[i]);
-      }
-    }
-    for(int i=0; i < 3; ++i) {
-      assertEquals("failure at " + i, 3, col1.vector[i]);
-    }
-    for(int i=3; i < 10; ++i) {
-      assertEquals("failure at " + i, -1, col1.vector[i]);
-    }
-    vector.unFlatten();
-
-    // TEST - reset
-    vector.reset();
-    assertFalse(col1.isRepeating);
-    assertTrue(col1.noNulls);
-    assertFalse(vector.isRepeating);
-    assertTrue(vector.noNulls);
-    assertEquals(0, vector.childCount);
-  }
-
-  @Test
-  public void testSet() throws Exception {
-    LongColumnVector input1 = new LongColumnVector(10);
-    ListColumnVector input = new ListColumnVector(10, input1);
-    input.init();
-    LongColumnVector output1 = new LongColumnVector(30);
-    ListColumnVector output = new ListColumnVector(10, output1);
-    output.init();
-    input.noNulls = false;
-    input.isNull[6] = true;
-    input.childCount = 11;
-    Arrays.fill(output1.vector, -1);
-    for(int i=0; i < 10; ++i) {
-      input1.vector[i] = 10 * i;
-      input.offsets[i] = i;
-      input.lengths[i] = 2;
-      output.offsets[i] = i + 2;
-      output.lengths[i] = 3;
-    }
-    output.childCount = 30;
-
-    // copy a null
-    output.setElement(3, 6, input);
-    assertEquals(30, output.childCount);
-    StringBuilder buf = new StringBuilder();
-    output.stringifyValue(buf, 3);
-    assertEquals("null", buf.toString());
-
-    // copy a value
-    output.setElement(3, 5, input);
-    assertEquals(30, output.offsets[3]);
-    assertEquals(2, output.lengths[3]);
-    assertEquals(32, output.childCount);
-    buf = new StringBuilder();
-    output.stringifyValue(buf, 3);
-    assertEquals("[50, 60]", buf.toString());
-
-    // overwrite a value
-    output.setElement(3, 4, input);
-    assertEquals(34, output.childCount);
-    assertEquals(34, output1.vector.length);
-    assertEquals(50, output1.vector[30]);
-    assertEquals(60, output1.vector[31]);
-    buf = new StringBuilder();
-    output.stringifyValue(buf, 3);
-    assertEquals("[40, 50]", buf.toString());
-
-    input.reset();
-    assertEquals(false, input1.isRepeating);
-    assertEquals(true, input.noNulls);
-    output.reset();
-    assertEquals(0, output.childCount);
-
-    input.isRepeating = true;
-    input.offsets[0] = 0;
-    input.lengths[0] = 10;
-    output.setElement(2, 7, input);
-    assertEquals(10, output.childCount);
-    buf = new StringBuilder();
-    output.stringifyValue(buf, 2);
-    assertEquals("[0, 10, 20, 30, 40, 50, 60, 70, 80, 90]", buf.toString());
-  }
-}
diff --git a/java/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestMapColumnVector.java b/java/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestMapColumnVector.java
deleted file mode 100644
index c77c286..0000000
--- a/java/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestMapColumnVector.java
+++ /dev/null
@@ -1,224 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec.vector;
-
-import org.junit.Test;
-
-import java.util.Arrays;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test for MapColumnVector
- */
-public class TestMapColumnVector {
-
-  @Test
-  public void testFlatten() throws Exception {
-    LongColumnVector col1 = new LongColumnVector(10);
-    DoubleColumnVector col2 = new DoubleColumnVector(10);
-    MapColumnVector vector = new MapColumnVector(10, col1, col2);
-    vector.init();
-
-    // TEST - repeating NULL & no selection
-    col1.isRepeating = true;
-    vector.isRepeating = true;
-    vector.noNulls = false;
-    vector.isNull[0] = true;
-    vector.childCount = 0;
-    for(int i=0; i < 10; ++i) {
-      col1.vector[i] = i + 3;
-      col2.vector[i] = i * 10;
-      vector.offsets[i] = i;
-      vector.lengths[i] = 10 + i;
-    }
-    vector.flatten(false, null, 10);
-    // make sure the vector was flattened
-    assertFalse(vector.isRepeating);
-    assertFalse(vector.noNulls);
-    // child isn't flattened, because parent is repeating null
-    assertTrue(col1.isRepeating);
-    assertTrue(col1.noNulls);
-    for(int i=0; i < 10; ++i) {
-      assertTrue("isNull at " + i, vector.isNull[i]);
-    }
-    for(int i=0; i < 10; ++i) {
-      StringBuilder buf = new StringBuilder();
-      vector.stringifyValue(buf, i);
-      assertEquals("null", buf.toString());
-    }
-    vector.unFlatten();
-    assertTrue(col1.isRepeating);
-    assertTrue(vector.isRepeating);
-
-    // TEST - repeating NULL & selection
-    Arrays.fill(vector.isNull, 1, 10, false);
-    int[] sel = new int[]{3, 5, 7};
-    vector.flatten(true, sel, 3);
-    for(int i=1; i < 10; i++) {
-      assertEquals("failure at " + i,
-          i == 3 || i == 5 || i == 7, vector.isNull[i]);
-    }
-    vector.unFlatten();
-
-    // TEST - repeating non-NULL & no-selection
-    vector.noNulls = true;
-    vector.isRepeating = true;
-    vector.offsets[0] = 0;
-    vector.lengths[0] = 3;
-    vector.childCount = 3;
-    vector.flatten(false, null, 10);
-    // make sure the vector was flattened
-    assertFalse(vector.isRepeating);
-    assertFalse(vector.noNulls);
-    assertFalse(col1.isRepeating);
-    assertFalse(col1.noNulls);
-    assertFalse(col2.isRepeating);
-    assertFalse(col2.noNulls);
-    for(int i=0; i < 10; ++i) {
-      assertEquals("offset at " + i, 0, vector.offsets[i]);
-      assertEquals("length at " + i, 3, vector.lengths[i]);
-    }
-    for(int i=0; i < 10; ++i) {
-      StringBuilder buf = new StringBuilder();
-      vector.stringifyValue(buf, i);
-      assertEquals("[{\"key\": 3, \"value\": 0.0}," +
-          " {\"key\": 3, \"value\": 10.0}," +
-          " {\"key\": 3, \"value\": 20.0}]", buf.toString());
-    }
-    vector.unFlatten();
-    assertTrue(col1.isRepeating);
-    assertTrue(col1.noNulls);
-    assertTrue(vector.isRepeating);
-    assertFalse(col2.isRepeating);
-    assertTrue(col2.noNulls);
-    assertTrue(vector.noNulls);
-
-    // TEST - repeating non-NULL & selection
-    Arrays.fill(vector.offsets, 1, 10, -1);
-    Arrays.fill(vector.lengths, 1, 10, -1);
-    Arrays.fill(col1.vector, 1, 10, -1);
-    vector.flatten(true, sel, 3);
-    for(int i=1; i < 10; i++) {
-      if (i == 3 || i == 5 || i == 7) {
-        assertEquals("failure at " + i, 0, vector.offsets[i]);
-        assertEquals("failure at " + i, 3, vector.lengths[i]);
-      } else {
-        assertEquals("failure at " + i, -1, vector.offsets[i]);
-        assertEquals("failure at " + i, -1, vector.lengths[i]);
-      }
-    }
-    for(int i=0; i < 3; ++i) {
-      assertEquals("failure at " + i, 3, col1.vector[i]);
-    }
-    for(int i=3; i < 10; ++i) {
-      assertEquals("failure at " + i, -1, col1.vector[i]);
-    }
-    vector.unFlatten();
-
-    // TEST - reset
-    vector.reset();
-    assertFalse(col1.isRepeating);
-    assertTrue(col1.noNulls);
-    assertFalse(col2.isRepeating);
-    assertTrue(col2.noNulls);
-    assertFalse(vector.isRepeating);
-    assertTrue(vector.noNulls);
-    assertEquals(0, vector.childCount);
-  }
-
-  @Test
-  public void testSet() throws Exception {
-    LongColumnVector input1 = new LongColumnVector(10);
-    DoubleColumnVector input2 = new DoubleColumnVector(10);
-    MapColumnVector input = new MapColumnVector(10, input1, input2);
-    input.init();
-    LongColumnVector output1 = new LongColumnVector(30);
-    DoubleColumnVector output2 = new DoubleColumnVector(30);
-    MapColumnVector output = new MapColumnVector(10, output1, output2);
-    output.init();
-    input.noNulls = false;
-    input.isNull[6] = true;
-    input.childCount = 11;
-    Arrays.fill(output1.vector, -1);
-    for(int i=0; i < 10; ++i) {
-      input1.vector[i] = 10 * i;
-      input2.vector[i] = 100 * i;
-      input.offsets[i] = i;
-      input.lengths[i] = 2;
-      output.offsets[i] = i + 2;
-      output.lengths[i] = 3;
-    }
-    output.childCount = 30;
-
-    // copy a null
-    output.setElement(3, 6, input);
-    assertEquals(30, output.childCount);
-    StringBuilder buf = new StringBuilder();
-    output.stringifyValue(buf, 3);
-    assertEquals("null", buf.toString());
-
-    // copy a value
-    output.setElement(3, 5, input);
-    assertEquals(30, output.offsets[3]);
-    assertEquals(2, output.lengths[3]);
-    assertEquals(32, output.childCount);
-    buf = new StringBuilder();
-    output.stringifyValue(buf, 3);
-    assertEquals("[{\"key\": 50, \"value\": 500.0}," +
-        " {\"key\": 60, \"value\": 600.0}]", buf.toString());
-
-    // overwrite a value
-    output.setElement(3, 4, input);
-    assertEquals(34, output.childCount);
-    assertEquals(34, output1.vector.length);
-    assertEquals(50, output1.vector[30]);
-    assertEquals(60, output1.vector[31]);
-    buf = new StringBuilder();
-    output.stringifyValue(buf, 3);
-    assertEquals("[{\"key\": 40, \"value\": 400.0}," +
-        " {\"key\": 50, \"value\": 500.0}]", buf.toString());
-
-    input.reset();
-    assertEquals(false, input1.isRepeating);
-    assertEquals(true, input.noNulls);
-    output.reset();
-    assertEquals(0, output.childCount);
-
-    input.isRepeating = true;
-    input.offsets[0] = 0;
-    input.lengths[0] = 10;
-    output.setElement(2, 7, input);
-    assertEquals(10, output.childCount);
-    buf = new StringBuilder();
-    output.stringifyValue(buf, 2);
-    assertEquals("[{\"key\": 0, \"value\": 0.0}," +
-        " {\"key\": 10, \"value\": 100.0}," +
-        " {\"key\": 20, \"value\": 200.0}," +
-        " {\"key\": 30, \"value\": 300.0}," +
-        " {\"key\": 40, \"value\": 400.0}," +
-        " {\"key\": 50, \"value\": 500.0}," +
-        " {\"key\": 60, \"value\": 600.0}," +
-        " {\"key\": 70, \"value\": 700.0}," +
-        " {\"key\": 80, \"value\": 800.0}," +
-        " {\"key\": 90, \"value\": 900.0}]", buf.toString());
-  }
-}
diff --git a/java/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestStructColumnVector.java b/java/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestStructColumnVector.java
deleted file mode 100644
index 9ac7ba0..0000000
--- a/java/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestStructColumnVector.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec.vector;
-
-import org.junit.Test;
-
-import java.io.IOException;
-import java.nio.charset.StandardCharsets;
-import java.sql.Timestamp;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test for StructColumnVector
- */
-public class TestStructColumnVector {
-
-  @Test
-  public void testFlatten() throws Exception {
-    LongColumnVector col1 = new LongColumnVector(10);
-    LongColumnVector col2 = new LongColumnVector(10);
-    StructColumnVector vector = new StructColumnVector(10, col1, col2);
-    vector.init();
-    col1.isRepeating = true;
-    for(int i=0; i < 10; ++i) {
-      col1.vector[i] = i;
-      col2.vector[i] = 2 * i;
-    }
-    vector.flatten(false, null, 10);
-    assertFalse(col1.isRepeating);
-    for(int i=0; i < 10; ++i) {
-      assertEquals("col1 at " + i, 0, col1.vector[i]);
-      assertEquals("col2 at " + i, 2 * i, col2.vector[i]);
-    }
-    vector.unFlatten();
-    assertTrue(col1.isRepeating);
-    for(int i=0; i < 10; ++i) {
-      StringBuilder buf = new StringBuilder();
-      vector.stringifyValue(buf, i);
-      assertEquals("[0, " + (2 * i) + "]", buf.toString());
-    }
-    vector.reset();
-    assertFalse(col1.isRepeating);
-  }
-
-  @Test
-  public void testSet() throws Exception {
-    LongColumnVector input1 = new LongColumnVector(10);
-    LongColumnVector input2 = new LongColumnVector(10);
-    StructColumnVector input = new StructColumnVector(10, input1, input2);
-    input.init();
-    LongColumnVector output1 = new LongColumnVector(10);
-    LongColumnVector output2 = new LongColumnVector(10);
-    StructColumnVector output = new StructColumnVector(10, output1, output2);
-    output.init();
-    input1.isRepeating = true;
-    input2.noNulls = false;
-    input2.isNull[5] = true;
-    input.noNulls = false;
-    input.isNull[6] = true;
-    for(int i=0; i < 10; ++i) {
-      input1.vector[i] = i + 1;
-      input2.vector[i] = i + 2;
-    }
-    output.setElement(3, 6, input);
-    StringBuilder buf = new StringBuilder();
-    output.stringifyValue(buf, 3);
-    assertEquals("null", buf.toString());
-    output.setElement(3, 5, input);
-    buf = new StringBuilder();
-    output.stringifyValue(buf, 3);
-    assertEquals("[1, null]", buf.toString());
-    output.setElement(3, 4, input);
-    buf = new StringBuilder();
-    output.stringifyValue(buf, 3);
-    assertEquals("[1, 6]", buf.toString());
-    input.reset();
-    assertEquals(false, input1.isRepeating);
-    assertEquals(true, input.noNulls);
-  }
-
-  @Test
-  public void testStringify() throws IOException {
-    VectorizedRowBatch batch = new VectorizedRowBatch(2);
-    LongColumnVector x1 = new LongColumnVector();
-    TimestampColumnVector x2 = new TimestampColumnVector();
-    StructColumnVector x = new StructColumnVector(1024, x1, x2);
-    BytesColumnVector y = new BytesColumnVector();
-    batch.cols[0] = x;
-    batch.cols[1] = y;
-    batch.reset();
-    Timestamp ts = Timestamp.valueOf("2000-01-01 00:00:00");
-    for(int r=0; r < 10; ++r) {
-      batch.size += 1;
-      x1.vector[r] = 3 * r;
-      ts.setTime(ts.getTime() + 1000);
-      x2.set(r, ts);
-      byte[] buffer = ("value " + r).getBytes(StandardCharsets.UTF_8);
-      y.setRef(r, buffer, 0, buffer.length);
-    }
-    final String EXPECTED = ("[[0, 2000-01-01 00:00:01.0], \"value 0\"]\n" +
-        "[[3, 2000-01-01 00:00:02.0], \"value 1\"]\n" +
-        "[[6, 2000-01-01 00:00:03.0], \"value 2\"]\n" +
-        "[[9, 2000-01-01 00:00:04.0], \"value 3\"]\n" +
-        "[[12, 2000-01-01 00:00:05.0], \"value 4\"]\n" +
-        "[[15, 2000-01-01 00:00:06.0], \"value 5\"]\n" +
-        "[[18, 2000-01-01 00:00:07.0], \"value 6\"]\n" +
-        "[[21, 2000-01-01 00:00:08.0], \"value 7\"]\n" +
-        "[[24, 2000-01-01 00:00:09.0], \"value 8\"]\n" +
-        "[[27, 2000-01-01 00:00:10.0], \"value 9\"]");
-    assertEquals(EXPECTED, batch.toString());
-  }
-}
diff --git a/java/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestTimestampColumnVector.java b/java/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestTimestampColumnVector.java
deleted file mode 100644
index 6e5d5c8..0000000
--- a/java/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestTimestampColumnVector.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec.vector;
-
-import org.junit.Test;
-
-import java.io.PrintWriter;
-import java.math.BigDecimal;
-import java.math.RoundingMode;
-import java.sql.Timestamp;
-import java.util.Date;
-import java.util.Random;
-
-import org.apache.hadoop.hive.common.type.RandomTypeUtil;
-
-import static org.junit.Assert.*;
-
-/**
- * Test for ListColumnVector
- */
-public class TestTimestampColumnVector {
-
-  private static int TEST_COUNT = 5000;
-
-  private static int fake = 0;
-
-  @Test
-  public void testSaveAndRetrieve() throws Exception {
-
-    Random r = new Random(1234);
-    TimestampColumnVector timestampColVector = new TimestampColumnVector();
-    Timestamp[] randTimestamps = new Timestamp[VectorizedRowBatch.DEFAULT_SIZE];
-
-    for (int i = 0; i < VectorizedRowBatch.DEFAULT_SIZE; i++) {
-      Timestamp randTimestamp = RandomTypeUtil.getRandTimestamp(r);
-      randTimestamps[i] = randTimestamp;
-      timestampColVector.set(i, randTimestamp);
-    }
-    for (int i = 0; i < VectorizedRowBatch.DEFAULT_SIZE; i++) {
-      Timestamp retrievedTimestamp = timestampColVector.asScratchTimestamp(i);
-      Timestamp randTimestamp = randTimestamps[i];
-      if (!retrievedTimestamp.equals(randTimestamp)) {
-        assertTrue(false);
-      }
-    }
-  }
-
-  @Test
-  public void testTimestampCompare() throws Exception {
-    Random r = new Random(1234);
-    TimestampColumnVector timestampColVector = new TimestampColumnVector();
-    Timestamp[] randTimestamps = new Timestamp[VectorizedRowBatch.DEFAULT_SIZE];
-    Timestamp[] candTimestamps = new Timestamp[VectorizedRowBatch.DEFAULT_SIZE];
-    int[] compareToLeftRights = new int[VectorizedRowBatch.DEFAULT_SIZE];
-    int[] compareToRightLefts = new int[VectorizedRowBatch.DEFAULT_SIZE];
-
-    for (int i = 0; i < VectorizedRowBatch.DEFAULT_SIZE; i++) {
-      Timestamp randTimestamp = RandomTypeUtil.getRandTimestamp(r);
-      randTimestamps[i] = randTimestamp;
-      timestampColVector.set(i, randTimestamp);
-      Timestamp candTimestamp = RandomTypeUtil.getRandTimestamp(r);
-      candTimestamps[i] = candTimestamp;
-      compareToLeftRights[i] = candTimestamp.compareTo(randTimestamp);
-      compareToRightLefts[i] = randTimestamp.compareTo(candTimestamp);
-    }
-
-    for (int i = 0; i < VectorizedRowBatch.DEFAULT_SIZE; i++) {
-      Timestamp retrievedTimestamp = timestampColVector.asScratchTimestamp(i);
-      Timestamp randTimestamp = randTimestamps[i];
-      if (!retrievedTimestamp.equals(randTimestamp)) {
-        assertTrue(false);
-      }
-      Timestamp candTimestamp = candTimestamps[i];
-      int compareToLeftRight = timestampColVector.compareTo(candTimestamp, i);
-      if (compareToLeftRight != compareToLeftRights[i]) {
-        assertTrue(false);
-      }
-      int compareToRightLeft = timestampColVector.compareTo(i, candTimestamp);
-      if (compareToRightLeft != compareToRightLefts[i]) {
-        assertTrue(false);
-      }
-    }
-  }
-
-  /*
-  @Test
-  public void testGenerate() throws Exception {
-    PrintWriter writer = new PrintWriter("/Users/you/timestamps.txt");
-    Random r = new Random(18485);
-    for (int i = 0; i < 25; i++) {
-      Timestamp randTimestamp = RandomTypeUtil.getRandTimestamp(r);
-      writer.println(randTimestamp.toString());
-    }
-    for (int i = 0; i < 25; i++) {
-      Timestamp randTimestamp = RandomTypeUtil.getRandTimestamp(r, 1965, 2025);
-      writer.println(randTimestamp.toString());
-    }
-    writer.close();
-  }
-  */
-}
diff --git a/java/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestUnionColumnVector.java b/java/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestUnionColumnVector.java
deleted file mode 100644
index c378cd4..0000000
--- a/java/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestUnionColumnVector.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec.vector;
-
-import org.junit.Test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test for StructColumnVector
- */
-public class TestUnionColumnVector {
-
-  @Test
-  public void testFlatten() throws Exception {
-    LongColumnVector col1 = new LongColumnVector(10);
-    LongColumnVector col2 = new LongColumnVector(10);
-    UnionColumnVector vector = new UnionColumnVector(10, col1, col2);
-    vector.init();
-    col1.isRepeating = true;
-    for(int i=0; i < 10; ++i) {
-      vector.tags[i] = i % 2;
-      col1.vector[i] = i;
-      col2.vector[i] = 2 * i;
-    }
-    vector.flatten(false, null, 10);
-    assertFalse(col1.isRepeating);
-    for(int i=0; i < 10; ++i) {
-      assertEquals(i % 2, vector.tags[i]);
-      assertEquals("col1 at " + i, 0, col1.vector[i]);
-      assertEquals("col2 at " + i, 2 * i, col2.vector[i]);
-    }
-    vector.unFlatten();
-    assertTrue(col1.isRepeating);
-    for(int i=0; i < 10; ++i) {
-      StringBuilder buf = new StringBuilder();
-      vector.stringifyValue(buf, i);
-      assertEquals("{\"tag\": " + (i % 2) + ", \"value\": " +
-          (i % 2 == 0 ? 0 : 2 * i) + "}", buf.toString());
-    }
-    vector.reset();
-    assertFalse(col1.isRepeating);
-  }
-
-  @Test
-  public void testSet() throws Exception {
-    LongColumnVector input1 = new LongColumnVector(10);
-    LongColumnVector input2 = new LongColumnVector(10);
-    UnionColumnVector input = new UnionColumnVector(10, input1, input2);
-    input.init();
-    LongColumnVector output1 = new LongColumnVector(10);
-    LongColumnVector output2 = new LongColumnVector(10);
-    UnionColumnVector output = new UnionColumnVector(10, output1, output2);
-    output.init();
-    input1.isRepeating = true;
-    for(int i=0; i < 10; ++i) {
-      input.tags[i] = i % 2;
-      input1.vector[i] = i + 1;
-      input2.vector[i] = i + 2;
-    }
-    output.setElement(3, 4, input);
-    StringBuilder buf = new StringBuilder();
-    output.stringifyValue(buf, 3);
-    assertEquals("{\"tag\": 0, \"value\": 1}", buf.toString());
-    input.noNulls = false;
-    input.isNull[5] = true;
-    output.setElement(3, 5, input);
-    buf = new StringBuilder();
-    output.stringifyValue(buf, 3);
-    assertEquals("null", buf.toString());
-    input.reset();
-    assertEquals(false, input1.isRepeating);
-    assertEquals(true, input.noNulls);
-  }
-}
diff --git a/java/storage-api/src/test/org/apache/hadoop/hive/ql/util/JavaDataModelTest.java b/java/storage-api/src/test/org/apache/hadoop/hive/ql/util/JavaDataModelTest.java
deleted file mode 100644
index 7cd2e12..0000000
--- a/java/storage-api/src/test/org/apache/hadoop/hive/ql/util/JavaDataModelTest.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.util;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertSame;
-
-public final class JavaDataModelTest {
-
-  private static final String DATA_MODEL_PROPERTY = "sun.arch.data.model";
-
-  private String previousModelSetting;
-
-  @Before
-  public void setUp() throws Exception {
-    previousModelSetting = System.getProperty(DATA_MODEL_PROPERTY);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    if (previousModelSetting != null) {
-      System.setProperty(DATA_MODEL_PROPERTY, previousModelSetting);
-    } else {
-      System.clearProperty(DATA_MODEL_PROPERTY);
-    }
-  }
-
-  @Test
-  public void testGetDoesNotReturnNull() throws Exception {
-    JavaDataModel model = JavaDataModel.get();
-    assertNotNull(model);
-  }
-
-  @Test
-  public void testGetModelForSystemWhenSetTo32() throws Exception {
-    System.setProperty(DATA_MODEL_PROPERTY, "32");
-    assertSame(JavaDataModel.JAVA32, JavaDataModel.getModelForSystem());
-  }
-
-  @Test
-  public void testGetModelForSystemWhenSetTo64() throws Exception {
-    System.setProperty(DATA_MODEL_PROPERTY, "64");
-    assertSame(JavaDataModel.JAVA64, JavaDataModel.getModelForSystem());
-  }
-
-  @Test
-  public void testGetModelForSystemWhenSetToUnknown() throws Exception {
-    System.setProperty(DATA_MODEL_PROPERTY, "unknown");
-    assertSame(JavaDataModel.JAVA64, JavaDataModel.getModelForSystem());
-  }
-
-  @Test
-  public void testGetModelForSystemWhenUndefined() throws Exception {
-    System.clearProperty(DATA_MODEL_PROPERTY);
-    assertSame(JavaDataModel.JAVA64, JavaDataModel.getModelForSystem());
-  }
-}
\ No newline at end of file