HCAT-493 Convert classes with 2 space indentation to 4 space indentation for consistent style

git-svn-id: https://svn.apache.org/repos/asf/incubator/hcatalog/trunk@1383152 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/CHANGES.txt b/CHANGES.txt
index 0025076..cfe404a 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -38,6 +38,8 @@
   HCAT-427 Document storage-based authorization (lefty via gates)
 
   IMPROVEMENTS
+  HCAT-493 Convert classes with 2 space indentation to 4 space indentation for consistent style (amalakar via traviscrawford)
+
   HCAT-489 HCatalog style cleanups and readd javac debug option (traviscrawford)
 
   HCAT-431 document hcat type to java class/pig type mapping (lefty via khorgath)
diff --git a/build.xml b/build.xml
index 76b8cb4..b7fc8a1 100644
--- a/build.xml
+++ b/build.xml
@@ -21,451 +21,452 @@
          xmlns:ivy="antlib:org.apache.ivy.ant"
          xmlns:artifact="artifact:org.apache.maven.artifact.ant">
 
-  <property name="path.to.basedir" location="${basedir}"/>
+    <property name="path.to.basedir" location="${basedir}"/>
 
-  <loadproperties srcfile="${basedir}/build.properties"/>
+    <loadproperties srcfile="${basedir}/build.properties"/>
 
-  <!--  
-  ================================================================================
-  Imports
-  ================================================================================ -->
+    <!--
+   ================================================================================
+   Imports
+   ================================================================================ -->
 
-  <!--
-  ================================================================================
-  Properties and Classpaths Section
-  ================================================================================
-  -->
+    <!--
+    ================================================================================
+    Properties and Classpaths Section
+    ================================================================================
+    -->
 
-  <condition property="staging">
-    <equals arg1="${repo}" arg2="staging"/>
-  </condition>
+    <condition property="staging">
+        <equals arg1="${repo}" arg2="staging"/>
+    </condition>
 
-  <!-- e2e test properties -->
-  <property name="test.e2e.dir" value="${basedir}/src/test/e2e/hcatalog"/>
+    <!-- e2e test properties -->
+    <property name="test.e2e.dir" value="${basedir}/src/test/e2e/hcatalog"/>
 
-  <!-- ivy properties set here -->
-  <property name="ivy.repo.dir" value="${user.home}/ivyrepo" />
-  <property name="ivy.dir" location="ivy" />
-  <loadproperties srcfile="${ivy.dir}/libraries.properties"/>
-  <property name="asfrepo" value="https://repository.apache.org"/>
-  <property name="asfsnapshotrepo" value="${asfrepo}/content/repositories/snapshots"/>
-  <property name="mvnrepo" value="http://repo2.maven.org/maven2"/>
-  <property name="asfstagingrepo" value="${asfrepo}/service/local/staging/deploy/maven2"/>
-  <property name="ivy.jar" location="${ivy.dir}/ivy-${ivy.version}.jar"/>
-  <property name="ant_task.jar" location="${ivy.dir}/maven-ant-tasks-${ant-task.version}.jar"/>
-  <property name="ant_task_repo_url"
-    value="${mvnrepo}/org/apache/maven/maven-ant-tasks/${ant-task.version}/maven-ant-tasks-${ant-task.version}.jar"/>
-  <property name="ivy_repo_url" value="${mvnrepo}/org/apache/ivy/ivy/${ivy.version}/ivy-${ivy.version}.jar"/>
-  <property name="ivy.xml" location="${basedir}/ivy.xml"/>
-  <property name="ivysettings.xml" location="${ivy.dir}/ivysettings.xml" />
-  <property name="build.ivy.dir" location="${build.dir}/ivy" />
-  <property name="pom.file" location="${build.ivy.dir}/${ant.project.name}-${hcatalog.version}.pom"/>
-  <property name="build.ivy.lib.dir" location="${build.ivy.dir}/lib" />
-  <property name="ivy.lib.dir" location="${build.ivy.lib.dir}/${ant.project.name}"/>
-  <property name="build.ivy.report.dir" location="${build.ivy.dir}/report" />
+    <!-- ivy properties set here -->
+    <property name="ivy.repo.dir" value="${user.home}/ivyrepo"/>
+    <property name="ivy.dir" location="ivy"/>
+    <loadproperties srcfile="${ivy.dir}/libraries.properties"/>
+    <property name="asfrepo" value="https://repository.apache.org"/>
+    <property name="asfsnapshotrepo" value="${asfrepo}/content/repositories/snapshots"/>
+    <property name="mvnrepo" value="http://repo2.maven.org/maven2"/>
+    <property name="asfstagingrepo" value="${asfrepo}/service/local/staging/deploy/maven2"/>
+    <property name="ivy.jar" location="${ivy.dir}/ivy-${ivy.version}.jar"/>
+    <property name="ant_task.jar" location="${ivy.dir}/maven-ant-tasks-${ant-task.version}.jar"/>
+    <property name="ant_task_repo_url"
+              value="${mvnrepo}/org/apache/maven/maven-ant-tasks/${ant-task.version}/maven-ant-tasks-${ant-task.version}.jar"/>
+    <property name="ivy_repo_url" value="${mvnrepo}/org/apache/ivy/ivy/${ivy.version}/ivy-${ivy.version}.jar"/>
+    <property name="ivy.xml" location="${basedir}/ivy.xml"/>
+    <property name="ivysettings.xml" location="${ivy.dir}/ivysettings.xml"/>
+    <property name="build.ivy.dir" location="${build.dir}/ivy"/>
+    <property name="pom.file" location="${build.ivy.dir}/${ant.project.name}-${hcatalog.version}.pom"/>
+    <property name="build.ivy.lib.dir" location="${build.ivy.dir}/lib"/>
+    <property name="ivy.lib.dir" location="${build.ivy.lib.dir}/${ant.project.name}"/>
+    <property name="build.ivy.report.dir" location="${build.ivy.dir}/report"/>
 
-  <!-- packaging properties -->
-  <property name="package.prefix" value="/usr"/>
-  <property name="package.conf.dir" value="/etc/hcatalog"/>
-  <property name="package.log.dir" value="/var/log/hcatalog"/>
-  <property name="package.pid.dir" value="/var/run/hcatalog"/>
-  <property name="package.var.dir" value="/var/lib/hcatalog"/>
-  <property name="package.share.dir" value="/share/hcatalog/${module}"/>
-  <property name="package.buildroot" value="${build.dir}/rpm/hcatalog_package_build_${user.name}"/>
-  <property name="package.build.dir" value="${build.dir}/rpm/hcatalog_package_build_${user.name}/BUILD"/>
+    <!-- packaging properties -->
+    <property name="package.prefix" value="/usr"/>
+    <property name="package.conf.dir" value="/etc/hcatalog"/>
+    <property name="package.log.dir" value="/var/log/hcatalog"/>
+    <property name="package.pid.dir" value="/var/run/hcatalog"/>
+    <property name="package.var.dir" value="/var/lib/hcatalog"/>
+    <property name="package.share.dir" value="/share/hcatalog/${module}"/>
+    <property name="package.buildroot" value="${build.dir}/rpm/hcatalog_package_build_${user.name}"/>
+    <property name="package.build.dir" value="${build.dir}/rpm/hcatalog_package_build_${user.name}/BUILD"/>
 
-  <!-- rats properties -->
-  <property name="rat.reporting.classname" value="rat.Report"/>
+    <!-- rats properties -->
+    <property name="rat.reporting.classname" value="rat.Report"/>
 
-  <path id="compile.classpath">
-    <fileset dir="${build.dir}/ivy/lib/default">
-      <include name="**/*.jar"/>
-    </fileset>
-  </path>
+    <path id="compile.classpath">
+        <fileset dir="${build.dir}/ivy/lib/default">
+            <include name="**/*.jar"/>
+        </fileset>
+    </path>
 
-  <path id="test.class.path">
-    <pathelement location="${test.classes}"/>
-    <pathelement location="${build.classes}"/>
-    <pathelement location="conf"/>
-    <fileset dir="${build.dir}/ivy/lib/test">
-      <include name="**/*.jar"/>
-    </fileset>
-    <pathelement location="${basedir}/hcatalog-pig-adapter/build/hcatalog-pig-adapter-${hcatalog.version}.jar"/>
-  </path>
+    <path id="test.class.path">
+        <pathelement location="${test.classes}"/>
+        <pathelement location="${build.classes}"/>
+        <pathelement location="conf"/>
+        <fileset dir="${build.dir}/ivy/lib/test">
+            <include name="**/*.jar"/>
+        </fileset>
+        <pathelement location="${basedir}/hcatalog-pig-adapter/build/hcatalog-pig-adapter-${hcatalog.version}.jar"/>
+    </path>
 
-  <!-- Classpath that includes all sub packages, used for things like Java docs -->
-  <path id="uber.classpath">
-    <fileset dir="${basedir}">
-      <include name="**/build/ivy/lib/default/*.jar"/>
-    </fileset>
-  </path>
+    <!-- Classpath that includes all sub packages, used for things like Java docs -->
+    <path id="uber.classpath">
+        <fileset dir="${basedir}">
+            <include name="**/build/ivy/lib/default/*.jar"/>
+        </fileset>
+    </path>
 
-  <!-- This is a little janky because hcatalog-core.jar is not yet a submodule. -->
-  <target name="ivy-report" depends="ivy-retrieve">
-    <antcall target="_ivy-report"/>
-    <ant target="_ivy-report" dir="hcatalog-pig-adapter" inheritAll="false" useNativeBasedir="true"/>
-    <ant target="ivy-report" dir="storage-handlers" inheritAll="false" useNativeBasedir="true"/>
-  </target>
+    <!-- This is a little janky because hcatalog-core.jar is not yet a submodule. -->
+    <target name="ivy-report" depends="ivy-retrieve">
+        <antcall target="_ivy-report"/>
+        <ant target="_ivy-report" dir="hcatalog-pig-adapter" inheritAll="false" useNativeBasedir="true"/>
+        <ant target="ivy-report" dir="storage-handlers" inheritAll="false" useNativeBasedir="true"/>
+    </target>
 
-  <target name="ivy-download" description="To download ivy" unless="offline">
-    <echo message="${ant.project.name}"/>
-    <get src="${ivy_repo_url}" dest="${ivy.jar}" usetimestamp="true" skipexisting="true"/>
-    <typedef uri="antlib:org.apache.ivy.ant" onerror="fail" loaderRef="ivyLoader">
-      <classpath>
-        <pathelement location="${ivy.jar}"/>
-      </classpath>
-    </typedef>
-  </target>
+    <target name="ivy-download" description="To download ivy" unless="offline">
+        <echo message="${ant.project.name}"/>
+        <get src="${ivy_repo_url}" dest="${ivy.jar}" usetimestamp="true" skipexisting="true"/>
+        <typedef uri="antlib:org.apache.ivy.ant" onerror="fail" loaderRef="ivyLoader">
+            <classpath>
+                <pathelement location="${ivy.jar}"/>
+            </classpath>
+        </typedef>
+    </target>
 
-  <target name="ivy-init" depends="ivy-download,_ivy-init">
-    <!-- HCatalog started as a single source tree that produced multiple artifacts.
-    Currently its going through a transition to subprojects. During this transition
-    we jank together pom files for artifacts produced in the existing source tree until
-    they are correctly generated from per-subproject ivy.xml files. -->
-    <copy file="${pom.file}" tofile="${build.ivy.dir}/hcatalog-core-${hcatalog.version}.pom"
-          overwrite="true"/>
-    <replace file="${build.ivy.dir}/hcatalog-core-${hcatalog.version}.pom"
-             token="artifactId>hcatalog"
-             value="artifactId>hcatalog-core"/>
+    <target name="ivy-init" depends="ivy-download,_ivy-init">
+        <!-- HCatalog started as a single source tree that produced multiple artifacts.
+        Currently its going through a transition to subprojects. During this transition
+        we jank together pom files for artifacts produced in the existing source tree until
+        they are correctly generated from per-subproject ivy.xml files. -->
+        <copy file="${pom.file}" tofile="${build.ivy.dir}/hcatalog-core-${hcatalog.version}.pom"
+              overwrite="true"/>
+        <replace file="${build.ivy.dir}/hcatalog-core-${hcatalog.version}.pom"
+                 token="artifactId>hcatalog"
+                 value="artifactId>hcatalog-core"/>
 
-    <copy file="${pom.file}" overwrite="true"
-          tofile="${build.ivy.dir}/hcatalog-server-extensions-${hcatalog.version}.pom"/>
-    <replace file="${build.ivy.dir}/hcatalog-server-extensions-${hcatalog.version}.pom"
-             token="artifactId>hcatalog"
-             value="artifactId>hcatalog-server-extensions"/>
-  </target>
+        <copy file="${pom.file}" overwrite="true"
+              tofile="${build.ivy.dir}/hcatalog-server-extensions-${hcatalog.version}.pom"/>
+        <replace file="${build.ivy.dir}/hcatalog-server-extensions-${hcatalog.version}.pom"
+                 token="artifactId>hcatalog"
+                 value="artifactId>hcatalog-server-extensions"/>
+    </target>
 
-  <target name="init" depends="ivy-retrieve,mvn-init">
-    <mkdir dir="${dist.dir}" />
-    <mkdir dir="${build.classes}" />
-    <mkdir dir="${test.classes}" />
-  </target>
+    <target name="init" depends="ivy-retrieve,mvn-init">
+        <mkdir dir="${dist.dir}"/>
+        <mkdir dir="${build.classes}"/>
+        <mkdir dir="${test.classes}"/>
+    </target>
 
-  <!--
-  ================================================================================
-  Main Build and Jar Section
-  ================================================================================
-  -->
-  <!-- Build HCatalog src files -->
-  <target name="compile-src" depends="init">
-    <javac encoding="${build.encoding}" srcdir="${src.dir}" excludes="${excludes}"
-        includes="**/*.java" destdir="${build.classes}" debug="${javac.debug}"
-        optimize="${javac.optimize}" target="${javac.version}"
-        source="${javac.version}" deprecation="${javac.deprecation}"
-        includeantruntime="false">
-      <compilerarg line="${javac.args}"/>
-      <classpath refid="compile.classpath"/>
-    </javac>
-  </target>
- 
-  <!-- Build the hcatalog client jar -->
-  <target name="clientjar" depends="compile-src">
-    <jar jarfile="${build.dir}/${ant.project.name}/${hcatalog.core.jar}"
-         basedir="${build.classes}"/>
-    <artifact:install file="${build.dir}/${ant.project.name}/${hcatalog.core.jar}">
-      <artifact:pom file="${build.ivy.dir}/hcatalog-core-${hcatalog.version}.pom"/>
-    </artifact:install>
-  </target>
+    <!--
+    ================================================================================
+    Main Build and Jar Section
+    ================================================================================
+    -->
+    <!-- Build HCatalog src files -->
+    <target name="compile-src" depends="init">
+        <javac encoding="${build.encoding}" srcdir="${src.dir}" excludes="${excludes}"
+               includes="**/*.java" destdir="${build.classes}" debug="${javac.debug}"
+               optimize="${javac.optimize}" target="${javac.version}"
+               source="${javac.version}" deprecation="${javac.deprecation}"
+               includeantruntime="false">
+            <compilerarg line="${javac.args}"/>
+            <classpath refid="compile.classpath"/>
+        </javac>
+    </target>
 
-  <!--
-  ================================================================================
-  Build server side code, mainly listener.
-  ================================================================================
-  -->
-  <target name="server-extensions" depends="compile-src">
-    <jar jarfile="${build.dir}/${ant.project.name}/${ant.project.name}-server-extensions-${hcatalog.version}.jar"
-         basedir="${build.classes}"
-         includes="org/apache/hcatalog/listener/**,org/apache/hcatalog/metadata/**"/>
-    <artifact:install file="${build.dir}/${ant.project.name}/${ant.project.name}-server-extensions-${hcatalog.version}.jar">
-      <artifact:pom file="${build.ivy.dir}/hcatalog-server-extensions-${hcatalog.version}.pom"/>
-    </artifact:install>
-  </target>
+    <!-- Build the hcatalog client jar -->
+    <target name="clientjar" depends="compile-src">
+        <jar jarfile="${build.dir}/${ant.project.name}/${hcatalog.core.jar}"
+             basedir="${build.classes}"/>
+        <artifact:install file="${build.dir}/${ant.project.name}/${hcatalog.core.jar}">
+            <artifact:pom file="${build.ivy.dir}/hcatalog-core-${hcatalog.version}.pom"/>
+        </artifact:install>
+    </target>
 
-  <!--
-  ================================================================================
-  Build both clientjar and server-extensions
-  ================================================================================
-  -->
-  <target name="jar" depends="checkstyle,shims,clientjar,server-extensions,jar-storage-handlers">
-    <ant target="jar" dir="hcatalog-pig-adapter" inheritAll="false"/>
-    <ant target="jar" dir="webhcat/svr" inheritAll="false"/>
-    <ant target="jar" dir="webhcat/java-client" inheritAll="false"/>
+    <!--
+    ================================================================================
+    Build server side code, mainly listener.
+    ================================================================================
+    -->
+    <target name="server-extensions" depends="compile-src">
+        <jar jarfile="${build.dir}/${ant.project.name}/${ant.project.name}-server-extensions-${hcatalog.version}.jar"
+             basedir="${build.classes}"
+             includes="org/apache/hcatalog/listener/**,org/apache/hcatalog/metadata/**"/>
+        <artifact:install
+                file="${build.dir}/${ant.project.name}/${ant.project.name}-server-extensions-${hcatalog.version}.jar">
+            <artifact:pom file="${build.ivy.dir}/hcatalog-server-extensions-${hcatalog.version}.pom"/>
+        </artifact:install>
+    </target>
 
-    <!-- Build hcatalog.jar, bundling the pig adapter. Our intention is to stop producing this
-         fat jar after some migration period. -->
-    <jar jarfile="${build.dir}/${ant.project.name}/${hcatalog.jar}">
-      <zipfileset src="${build.dir}/${ant.project.name}/${hcatalog.core.jar}"/>
-      <zipfileset src="hcatalog-pig-adapter/build/hcatalog-pig-adapter-${hcatalog.version}.jar"/>
-    </jar>
-  </target>
+    <!--
+    ================================================================================
+    Build both clientjar and server-extensions
+    ================================================================================
+    -->
+    <target name="jar" depends="checkstyle,shims,clientjar,server-extensions,jar-storage-handlers">
+        <ant target="jar" dir="hcatalog-pig-adapter" inheritAll="false"/>
+        <ant target="jar" dir="webhcat/svr" inheritAll="false"/>
+        <ant target="jar" dir="webhcat/java-client" inheritAll="false"/>
 
-  <!--
-  ================================================================================
-  Build shims
-  ================================================================================
-  -->
+        <!-- Build hcatalog.jar, bundling the pig adapter. Our intention is to stop producing this
+             fat jar after some migration period. -->
+        <jar jarfile="${build.dir}/${ant.project.name}/${hcatalog.jar}">
+            <zipfileset src="${build.dir}/${ant.project.name}/${hcatalog.core.jar}"/>
+            <zipfileset src="hcatalog-pig-adapter/build/hcatalog-pig-adapter-${hcatalog.version}.jar"/>
+        </jar>
+    </target>
 
-  <target name="shims" depends="compile-src">
-    <ant antfile="shims/build.xml" target="jar" inheritAll="false" useNativeBasedir="true"/>
-  </target>
+    <!--
+    ================================================================================
+    Build shims
+    ================================================================================
+    -->
 
-  <!--
-  ================================================================================
-  Build storage handlers
-  ================================================================================
-  -->
+    <target name="shims" depends="compile-src">
+        <ant antfile="shims/build.xml" target="jar" inheritAll="false" useNativeBasedir="true"/>
+    </target>
 
-  <target name="jar-storage-handlers">        
-    <ant target="jar" dir="storage-handlers" inheritAll="false" useNativeBasedir="true"/>
-  </target>
+    <!--
+    ================================================================================
+    Build storage handlers
+    ================================================================================
+    -->
 
-  <!--
-  ================================================================================
-  Test Section
-  ================================================================================
-  -->
-  <!-- Build HCatalog test files -->
-  <target name="compile-test" depends="jar">
-    <javac
-        encoding="${build.encoding}"
-        srcdir="${test.src.dir}"
-        excludes="${test.excludes}"
-        includes="**/*.java"
-        destdir="${test.classes}"
-        debug="${javac.debug}"
-        optimize="${javac.optimize}"
-        target="${javac.version}"
-        source="${javac.version}"
-        deprecation="${javac.deprecation}"
-        includeantruntime="false">
-      <compilerarg line="${javac.args}"/>
-      <classpath refid="test.class.path"/>
-    </javac>
-  </target>
+    <target name="jar-storage-handlers">
+        <ant target="jar" dir="storage-handlers" inheritAll="false" useNativeBasedir="true"/>
+    </target>
 
-  <target name="test" depends="compile-test" description="run unit tests">
-    <_junit srcDir="${basedir}/src/test"/>
-    <ant target="test" dir="hcatalog-pig-adapter" inheritAll="false"/>
-    <ant target="test" dir="webhcat/svr" inheritAll="false"/>
-    <ant target="test" dir="webhcat/java-client" inheritAll="false"/>
-    <ant target="test" dir="storage-handlers" inheritAll="false" useNativeBasedir="true"/>
-  </target>
+    <!--
+    ================================================================================
+    Test Section
+    ================================================================================
+    -->
+    <!-- Build HCatalog test files -->
+    <target name="compile-test" depends="jar">
+        <javac
+                encoding="${build.encoding}"
+                srcdir="${test.src.dir}"
+                excludes="${test.excludes}"
+                includes="**/*.java"
+                destdir="${test.classes}"
+                debug="${javac.debug}"
+                optimize="${javac.optimize}"
+                target="${javac.version}"
+                source="${javac.version}"
+                deprecation="${javac.deprecation}"
+                includeantruntime="false">
+            <compilerarg line="${javac.args}"/>
+            <classpath refid="test.class.path"/>
+        </javac>
+    </target>
 
-  <target name="test-with-clover" depends="clover-init, compile-test"
-          description="run unit tests and generate code coverage reports">
-    <_junit srcDir="${basedir}/src/test"/>
-    <ant target="_test-with-clover" dir="hcatalog-pig-adapter" inheritAll="false"/>
-    <ant target="_test-with-clover" dir="webhcat/svr" inheritAll="false"/>
-    <ant target="_test-with-clover" dir="webhcat/java-client" inheritAll="false"/>
-    <!-- storage-handlers do not have coverage as they have not
-         yet been migrated to the new build files. -->
-    <ant target="test" dir="storage-handlers" inheritAll="false" useNativeBasedir="true"/>
-  </target>
+    <target name="test" depends="compile-test" description="run unit tests">
+        <_junit srcDir="${basedir}/src/test"/>
+        <ant target="test" dir="hcatalog-pig-adapter" inheritAll="false"/>
+        <ant target="test" dir="webhcat/svr" inheritAll="false"/>
+        <ant target="test" dir="webhcat/java-client" inheritAll="false"/>
+        <ant target="test" dir="storage-handlers" inheritAll="false" useNativeBasedir="true"/>
+    </target>
+
+    <target name="test-with-clover" depends="clover-init, compile-test"
+            description="run unit tests and generate code coverage reports">
+        <_junit srcDir="${basedir}/src/test"/>
+        <ant target="_test-with-clover" dir="hcatalog-pig-adapter" inheritAll="false"/>
+        <ant target="_test-with-clover" dir="webhcat/svr" inheritAll="false"/>
+        <ant target="_test-with-clover" dir="webhcat/java-client" inheritAll="false"/>
+        <!-- storage-handlers do not have coverage as they have not
+             yet been migrated to the new build files. -->
+        <ant target="test" dir="storage-handlers" inheritAll="false" useNativeBasedir="true"/>
+    </target>
 
 
-  <!--
-  ================================================================================
-  Findbugs Section
-  ================================================================================
-  -->
+    <!--
+    ================================================================================
+    Findbugs Section
+    ================================================================================
+    -->
 
-  <target name="findbugs" depends="init-findbugs,jar">
-    <property name="findbugs.out.dir" value="${test.dir}/findbugs"/>
-    <property name="findbugs.exclude.file" value="${test.src.dir}/findbugsExcludeFile.xml"/>
-    <property name="findbugs.report.htmlfile"
-      value="${findbugs.out.dir}/hcat-findbugs-report.html"/>
-    <property name="findbugs.report.xmlfile"
-      value="${findbugs.out.dir}/hcat-findbugs-report.xml"/>
+    <target name="findbugs" depends="init-findbugs,jar">
+        <property name="findbugs.out.dir" value="${test.dir}/findbugs"/>
+        <property name="findbugs.exclude.file" value="${test.src.dir}/findbugsExcludeFile.xml"/>
+        <property name="findbugs.report.htmlfile"
+                  value="${findbugs.out.dir}/hcat-findbugs-report.html"/>
+        <property name="findbugs.report.xmlfile"
+                  value="${findbugs.out.dir}/hcat-findbugs-report.xml"/>
 
-    <_findbugs outputDir="${findbugs.out.dir}"
-               outputFile="${findbugs.report.xmlfile}"
-               excludeFilter="${findbugs.exclude.file}"
-               findbugsReportXmlFile="${findbugs.report.xmlfile}"
-               findbugsReportHtmlFile="${findbugs.report.htmlfile}"
-               sourceDir="${src.dir}"
-               jarDir="${build.dir}/${ant.project.name}"
-               classPathRef="compile.classpath"/>
+        <_findbugs outputDir="${findbugs.out.dir}"
+                   outputFile="${findbugs.report.xmlfile}"
+                   excludeFilter="${findbugs.exclude.file}"
+                   findbugsReportXmlFile="${findbugs.report.xmlfile}"
+                   findbugsReportHtmlFile="${findbugs.report.htmlfile}"
+                   sourceDir="${src.dir}"
+                   jarDir="${build.dir}/${ant.project.name}"
+                   classPathRef="compile.classpath"/>
 
-    <ant target="findbugs" dir="hcatalog-pig-adapter" inheritAll="false"/>
-    <ant target="findbugs" dir="webhcat/svr" inheritAll="false"/>
-    <ant target="findbugs" dir="webhcat/java-client" inheritAll="false"/>
-  </target>
+        <ant target="findbugs" dir="hcatalog-pig-adapter" inheritAll="false"/>
+        <ant target="findbugs" dir="webhcat/svr" inheritAll="false"/>
+        <ant target="findbugs" dir="webhcat/java-client" inheritAll="false"/>
+    </target>
 
-  <!--
-  ================================================================================
-  Clean Section
-  ================================================================================
-  -->
-  <!-- Clean up children -->
-  <target name="clean" description="Cleanup all build artifacts">
-    <echo message="${ant.project.name}"/>
-    <delete dir="${build.dir}" />
-    <delete dir="${test.warehouse.dir}"/>
-    <ant target="clean" dir="hcatalog-pig-adapter" inheritAll="false"/>
-    <ant target="clean" dir="webhcat/svr" inheritAll="false"/>
-    <ant target="clean" dir="webhcat/java-client" inheritAll="false"/>
-    <ant target="clean" dir="storage-handlers" inheritAll="false" useNativeBasedir="true"/>
-    <ant target="clean" dir="shims" inheritAll="false" useNativeBasedir="true"/>
-  </target>
- 
-  <!--
-  ================================================================================
-  Docs Section
-  ================================================================================
-  -->
-  <target name="docs" depends="forrest, javadoc"
-          description="Generate Javadoc and Forrest documentation">
-  </target>
+    <!--
+    ================================================================================
+    Clean Section
+    ================================================================================
+    -->
+    <!-- Clean up children -->
+    <target name="clean" description="Cleanup all build artifacts">
+        <echo message="${ant.project.name}"/>
+        <delete dir="${build.dir}"/>
+        <delete dir="${test.warehouse.dir}"/>
+        <ant target="clean" dir="hcatalog-pig-adapter" inheritAll="false"/>
+        <ant target="clean" dir="webhcat/svr" inheritAll="false"/>
+        <ant target="clean" dir="webhcat/java-client" inheritAll="false"/>
+        <ant target="clean" dir="storage-handlers" inheritAll="false" useNativeBasedir="true"/>
+        <ant target="clean" dir="shims" inheritAll="false" useNativeBasedir="true"/>
+    </target>
 
-  <target name="forrest" if="forrest.home"
-          description="Generate forrest-based documentation. To use, specify -Dforrest.home=&lt;base of Apache Forrest installation&gt; on the command line.">
-    <exec dir="${docs.src}" executable="${forrest.home}/bin/forrest"
-          failonerror="true">
-    </exec>
-    <copy todir="${build.docs}/">
-      <fileset dir="${docs.src}/build/site/" />
-    </copy>
-  </target>
+    <!--
+    ================================================================================
+    Docs Section
+    ================================================================================
+    -->
+    <target name="docs" depends="forrest, javadoc"
+            description="Generate Javadoc and Forrest documentation">
+    </target>
 
-  <target name="javadoc" depends="jar" description="Generate Javadoc documentation">
-    <mkdir dir="${build.javadoc}" />
-    <javadoc overview="${src.dir}/../docs/overview.html"
-             packagenames="org.apache.hcatalog.*"
-             destdir="${build.javadoc}"
-             author="true"
-             version="true"
-             use="true"
-             noqualifier="all"
-             windowtitle="HCatalog ${hcatalog.version} API"
-             doctitle="HCatalog ${hcatalog.version} API"
-             failonerror="true">
-      <packageset dir="${src.dir}" />
-      <packageset dir="hcatalog-pig-adapter/src/main/java" />
-      <packageset dir="webhcat/svr/src/main/java" />
-      <packageset dir="webhcat/java-client/src/main/java" />
-      <classpath>
-        <path refid="uber.classpath"/>
-      </classpath>
-      <group title="hcatalog" packages="org.apache.hcatalog.*"/>
-    </javadoc>
-  </target>
+    <target name="forrest" if="forrest.home"
+            description="Generate forrest-based documentation. To use, specify -Dforrest.home=&lt;base of Apache Forrest installation&gt; on the command line.">
+        <exec dir="${docs.src}" executable="${forrest.home}/bin/forrest"
+              failonerror="true">
+        </exec>
+        <copy todir="${build.docs}/">
+            <fileset dir="${docs.src}/build/site/"/>
+        </copy>
+    </target>
 
-  <!--
-  ===============================================================================
-  Distribution Section
-  ===============================================================================
-  -->
-  <target name="package-storage-handlers">
-    <property name="handlers.dir" value="${dist.dir}/share/hcatalog/storage-handlers"/>
-    <mkdir dir="${handlers.dir}"/>
-    <ant target="package" dir="storage-handlers" inheritAll="false" useNativeBasedir="true">     
-        <property name="dist.handlers.dir" value="${handlers.dir}"/>
-    </ant>
-    <copy todir="${dist.dir}/share/${ant.project.name}/lib" includeEmptyDirs="false" flatten="true">
-      <fileset dir="${dist.dir}/share/${ant.project.name}/storage-handlers">
-        <include name="*/lib/*"/>
-      </fileset>
-    </copy>
-  </target>
+    <target name="javadoc" depends="jar" description="Generate Javadoc documentation">
+        <mkdir dir="${build.javadoc}"/>
+        <javadoc overview="${src.dir}/../docs/overview.html"
+                 packagenames="org.apache.hcatalog.*"
+                 destdir="${build.javadoc}"
+                 author="true"
+                 version="true"
+                 use="true"
+                 noqualifier="all"
+                 windowtitle="HCatalog ${hcatalog.version} API"
+                 doctitle="HCatalog ${hcatalog.version} API"
+                 failonerror="true">
+            <packageset dir="${src.dir}"/>
+            <packageset dir="hcatalog-pig-adapter/src/main/java"/>
+            <packageset dir="webhcat/svr/src/main/java"/>
+            <packageset dir="webhcat/java-client/src/main/java"/>
+            <classpath>
+                <path refid="uber.classpath"/>
+            </classpath>
+            <group title="hcatalog" packages="org.apache.hcatalog.*"/>
+        </javadoc>
+    </target>
 
-  <target name="package" depends="jar, docs" description="Create an HCatalog release">
-    <mkdir dir="${dist.dir}" />
-    <mkdir dir="${dist.dir}/share/${ant.project.name}/lib" />
-    <mkdir dir="${dist.dir}/etc/hcatalog" />
-    <mkdir dir="${dist.dir}/bin" />
-    <mkdir dir="${dist.dir}/sbin" />
-    <mkdir dir="${dist.dir}/share/${ant.project.name}/scripts" />
-    <mkdir dir="${dist.dir}/share/${ant.project.name}/templates/conf" />
-    <mkdir dir="${dist.dir}/share/doc/${ant.project.name}" />
-    <mkdir dir="${dist.dir}/share/doc/${ant.project.name}/api" />
-    <mkdir dir="${dist.dir}/share/doc/${ant.project.name}/jdiff"/>
-    <mkdir dir="${dist.dir}/share/doc/${ant.project.name}/license" />
+    <!--
+    ===============================================================================
+    Distribution Section
+    ===============================================================================
+    -->
+    <target name="package-storage-handlers">
+        <property name="handlers.dir" value="${dist.dir}/share/hcatalog/storage-handlers"/>
+        <mkdir dir="${handlers.dir}"/>
+        <ant target="package" dir="storage-handlers" inheritAll="false" useNativeBasedir="true">
+            <property name="dist.handlers.dir" value="${handlers.dir}"/>
+        </ant>
+        <copy todir="${dist.dir}/share/${ant.project.name}/lib" includeEmptyDirs="false" flatten="true">
+            <fileset dir="${dist.dir}/share/${ant.project.name}/storage-handlers">
+                <include name="*/lib/*"/>
+            </fileset>
+        </copy>
+    </target>
 
-    <copy todir="${dist.dir}/share/${ant.project.name}" includeEmptyDirs="false">
-      <fileset dir="${build.dir}/${ant.project.name}/">
-        <include name="hcatalog-*.jar"/>
-      </fileset>
-      <fileset dir="hcatalog-pig-adapter/build">
-        <include name="hcatalog-*.jar"/>
-      </fileset>
-      <fileset dir="webhcat/svr/build">
-        <include name="webhcat-*.jar"/>
-      </fileset>
-      <fileset dir="webhcat/java-client/build">
-        <include name="webhcat-java-client*.jar"/>
-      </fileset>
-    </copy>
+    <target name="package" depends="jar, docs" description="Create an HCatalog release">
+        <mkdir dir="${dist.dir}"/>
+        <mkdir dir="${dist.dir}/share/${ant.project.name}/lib"/>
+        <mkdir dir="${dist.dir}/etc/hcatalog"/>
+        <mkdir dir="${dist.dir}/bin"/>
+        <mkdir dir="${dist.dir}/sbin"/>
+        <mkdir dir="${dist.dir}/share/${ant.project.name}/scripts"/>
+        <mkdir dir="${dist.dir}/share/${ant.project.name}/templates/conf"/>
+        <mkdir dir="${dist.dir}/share/doc/${ant.project.name}"/>
+        <mkdir dir="${dist.dir}/share/doc/${ant.project.name}/api"/>
+        <mkdir dir="${dist.dir}/share/doc/${ant.project.name}/jdiff"/>
+        <mkdir dir="${dist.dir}/share/doc/${ant.project.name}/license"/>
 
-    <copy todir="${dist.dir}/bin">
-      <fileset dir="bin/">
-        <include name="hcat"/>
-      </fileset>
+        <copy todir="${dist.dir}/share/${ant.project.name}" includeEmptyDirs="false">
+            <fileset dir="${build.dir}/${ant.project.name}/">
+                <include name="hcatalog-*.jar"/>
+            </fileset>
+            <fileset dir="hcatalog-pig-adapter/build">
+                <include name="hcatalog-*.jar"/>
+            </fileset>
+            <fileset dir="webhcat/svr/build">
+                <include name="webhcat-*.jar"/>
+            </fileset>
+            <fileset dir="webhcat/java-client/build">
+                <include name="webhcat-java-client*.jar"/>
+            </fileset>
+        </copy>
 
-    </copy>
-    <copy todir="${dist.dir}/libexec">
-      <fileset dir="bin">
-        <include name="hcat-config.sh"/>
-      </fileset>
-    </copy>
+        <copy todir="${dist.dir}/bin">
+            <fileset dir="bin/">
+                <include name="hcat"/>
+            </fileset>
 
-    <copy todir="${dist.dir}/sbin">
-      <fileset dir="${package.dir}">
-        <include name="*.sh"/>
-      </fileset>
-    </copy>
+        </copy>
+        <copy todir="${dist.dir}/libexec">
+            <fileset dir="bin">
+                <include name="hcat-config.sh"/>
+            </fileset>
+        </copy>
 
-    <copy todir="${dist.dir}/etc/${ant.project.name}">
-      <fileset dir="conf" />
-    </copy>
+        <copy todir="${dist.dir}/sbin">
+            <fileset dir="${package.dir}">
+                <include name="*.sh"/>
+            </fileset>
+        </copy>
 
-    <copy todir="${dist.dir}/share/${ant.project.name}/scripts">
-       <fileset dir="scripts">
-         <include name="*.sh"/>
-       </fileset>
-    </copy>
+        <copy todir="${dist.dir}/etc/${ant.project.name}">
+            <fileset dir="conf"/>
+        </copy>
 
-    <copy todir="${dist.dir}/share/${ant.project.name}/templates/conf">
-      <fileset dir="src/packages/templates/conf">
-        <include name="*"/>
-      </fileset>
-    </copy>
+        <copy todir="${dist.dir}/share/${ant.project.name}/scripts">
+            <fileset dir="scripts">
+                <include name="*.sh"/>
+            </fileset>
+        </copy>
 
-    <copy todir="${dist.dir}/sbin">
-      <fileset dir="${package.dir}">
-        <include name="*.sh"/>
-      </fileset>
-      <fileset dir="webhcat/svr/src/main/bin">
-        <include name="*.sh"/>
-      </fileset>
-      <fileset dir="bin">
-        <include name="hcat_server.sh"/>
-      </fileset>
-    </copy>
+        <copy todir="${dist.dir}/share/${ant.project.name}/templates/conf">
+            <fileset dir="src/packages/templates/conf">
+                <include name="*"/>
+            </fileset>
+        </copy>
 
-	<!-- Copy the licenses and such -->
-    <copy todir="${dist.dir}/share/doc/${ant.project.name}">
-      <fileset dir=".">
-        <include name="*.txt" />
-      </fileset>
-    </copy>
+        <copy todir="${dist.dir}/sbin">
+            <fileset dir="${package.dir}">
+                <include name="*.sh"/>
+            </fileset>
+            <fileset dir="webhcat/svr/src/main/bin">
+                <include name="*.sh"/>
+            </fileset>
+            <fileset dir="bin">
+                <include name="hcat_server.sh"/>
+            </fileset>
+        </copy>
 
-    <copy todir="${dist.dir}/share/doc/${ant.project.name}/license">
-      <fileset dir="license" />
-    </copy>
+        <!-- Copy the licenses and such -->
+        <copy todir="${dist.dir}/share/doc/${ant.project.name}">
+            <fileset dir=".">
+                <include name="*.txt"/>
+            </fileset>
+        </copy>
 
-    <chmod perm="ugo+x" type="file">
-      <fileset dir="${dist.dir}/bin" />
-      <fileset dir="${dist.dir}/sbin" />
-    </chmod>
-       <!--package storage-handlers -->
-      <antcall target="package-storage-handlers"/>
+        <copy todir="${dist.dir}/share/doc/${ant.project.name}/license">
+            <fileset dir="license"/>
+        </copy>
+
+        <chmod perm="ugo+x" type="file">
+            <fileset dir="${dist.dir}/bin"/>
+            <fileset dir="${dist.dir}/sbin"/>
+        </chmod>
+        <!--package storage-handlers -->
+        <antcall target="package-storage-handlers"/>
     </target>
 
     <target name="releaseaudit" depends="ivy-retrieve" description="Release Audit activities">
-      <java classname="${rat.reporting.classname}" fork="true">
-        <classpath refid="releaseaudit.classpath"/>
-        <arg value="${basedir}/src"/>
-      </java>
+        <java classname="${rat.reporting.classname}" fork="true">
+            <classpath refid="releaseaudit.classpath"/>
+            <arg value="${basedir}/src"/>
+        </java>
     </target>
 
     <!-- ================================================================== -->
@@ -473,7 +474,8 @@
     <!-- ================================================================== -->
     <target name="src-release" depends="clean" description="Source distribution">
         <mkdir dir="${build.dir}"/>
-        <tar compression="gzip" longfile="gnu" destfile="${build.dir}/${ant.project.name}-src-${hcatalog.version}.tar.gz">
+        <tar compression="gzip" longfile="gnu"
+             destfile="${build.dir}/${ant.project.name}-src-${hcatalog.version}.tar.gz">
             <tarfileset dir="${basedir}" mode="644" prefix="${ant.project.name}-src-${hcatalog.version}">
                 <include name="conf/**"/>
                 <include name="hcatalog-pig-adapter/**"/>
@@ -493,7 +495,7 @@
                 <include name="bin/**"/>
             </tarfileset>
         </tar>
-    </target> 
+    </target>
 
     <!-- ================================================================== -->
     <!-- Make release binary packages                                       -->
@@ -501,15 +503,15 @@
     <target name="tar" depends="package" description="Create release tarball">
         <tar compression="gzip" longfile="gnu" destfile="${build.dir}/${final.name}.tar.gz">
             <tarfileset dir="${build.dir}" mode="664">
-                <include name="${final.name}/**" />
-                <exclude name="${final.name}/bin/*" />
-                <exclude name="${final.name}/sbin/*" />
-                <exclude name="${final.name}/share/hcatalog/scripts/*" />
+                <include name="${final.name}/**"/>
+                <exclude name="${final.name}/bin/*"/>
+                <exclude name="${final.name}/sbin/*"/>
+                <exclude name="${final.name}/share/hcatalog/scripts/*"/>
             </tarfileset>
             <tarfileset dir="${build.dir}" mode="755">
-                <include name="${final.name}/bin/*" />
-                <include name="${final.name}/sbin/*" />
-                <include name="${final.name}/share/hcatalog/scripts/*" />
+                <include name="${final.name}/bin/*"/>
+                <include name="${final.name}/sbin/*"/>
+                <include name="${final.name}/share/hcatalog/scripts/*"/>
             </tarfileset>
         </tar>
     </target>
@@ -530,10 +532,10 @@
         <ant dir="${test.e2e.dir}" target="deploy"/>
     </target>
 
-  <import file="ant/checkstyle.xml"/>
-  <import file="ant/dependencies.xml"/>
-  <import file="ant/deploy.xml"/>
-  <import file="ant/findbugs.xml"/>
-  <import file="ant/test.xml"/>
+    <import file="ant/checkstyle.xml"/>
+    <import file="ant/dependencies.xml"/>
+    <import file="ant/deploy.xml"/>
+    <import file="ant/findbugs.xml"/>
+    <import file="ant/test.xml"/>
 
 </project>
diff --git a/coding_style.xml b/coding_style.xml
index 2ba8723..4af26a6 100644
--- a/coding_style.xml
+++ b/coding_style.xml
@@ -1,7 +1,7 @@
 <?xml version="1.0"?>
 <!DOCTYPE module PUBLIC
-  "-//Puppy Crawl//DTD Check Configuration 1.3//EN"
-  "http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
+        "-//Puppy Crawl//DTD Check Configuration 1.3//EN"
+        "http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
 
 <!-- Checkstyle rules required by HCatalog.
 
@@ -12,28 +12,32 @@
 
 <module name="Checker">
 
-  <!-- Checks whether files end with a new line.                        -->
-  <!-- See http://checkstyle.sf.net/config_misc.html#NewlineAtEndOfFile -->
-  <module name="NewlineAtEndOfFile"/>
+    <!-- Checks whether files end with a new line.                        -->
+    <!-- See http://checkstyle.sf.net/config_misc.html#NewlineAtEndOfFile -->
+    <module name="NewlineAtEndOfFile"/>
 
-  <!-- Checks for Size Violations.                    -->
-  <!-- See http://checkstyle.sf.net/config_sizes.html -->
-  <module name="FileLength"/>
+    <!-- Checks for Size Violations.                    -->
+    <!-- See http://checkstyle.sf.net/config_sizes.html -->
+    <module name="FileLength"/>
 
-  <module name="Header">
-    <property name="headerFile" value="apache_header_java.txt"/>
-    <property name="fileExtensions" value="java"/>
-  </module>
+    <module name="Header">
+        <property name="headerFile" value="apache_header_java.txt"/>
+        <property name="fileExtensions" value="java"/>
+    </module>
 
-  <module name="TreeWalker">
+    <module name="TreeWalker">
 
-    <!-- Checks for imports                              -->
-    <!-- See http://checkstyle.sf.net/config_import.html -->
-    <module name="AvoidStarImport"/>
-    <module name="IllegalImport"/> <!-- defaults to sun.* packages -->
-    <module name="RedundantImport"/>
-    <module name="UnusedImports"/>
+        <!-- Checks for imports                              -->
+        <!-- See http://checkstyle.sf.net/config_import.html -->
+        <module name="AvoidStarImport"/>
+        <module name="IllegalImport"/> <!-- defaults to sun.* packages -->
+        <module name="RedundantImport"/>
+        <module name="UnusedImports"/>
 
-  </module>
+        <module name="Indentation">
+            <property name="caseIndent" value="0"/>
+        </module>
+
+    </module>
 
 </module>
diff --git a/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatBaseLoader.java b/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatBaseLoader.java
index 734b758..031afda 100644
--- a/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatBaseLoader.java
+++ b/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatBaseLoader.java
@@ -48,102 +48,102 @@
 
 public abstract class HCatBaseLoader extends LoadFunc implements LoadMetadata, LoadPushDown {
 
-  protected static final String PRUNE_PROJECTION_INFO = "prune.projection.info";
+    protected static final String PRUNE_PROJECTION_INFO = "prune.projection.info";
 
-  private RecordReader<?, ?> reader;
-  protected String signature;
+    private RecordReader<?, ?> reader;
+    protected String signature;
 
-  HCatSchema outputSchema = null;
+    HCatSchema outputSchema = null;
 
 
-  @Override
-  public Tuple getNext() throws IOException {
-    try {
-      HCatRecord hr =  (HCatRecord) (reader.nextKeyValue() ? reader.getCurrentValue() : null);
-      Tuple t = PigHCatUtil.transformToTuple(hr,outputSchema);
-      // TODO : we were discussing an iter interface, and also a LazyTuple
-      // change this when plans for that solidifies.
-      return t;
-    } catch (ExecException e) {
-      int errCode = 6018;
-      String errMsg = "Error while reading input";
-      throw new ExecException(errMsg, errCode,
-          PigException.REMOTE_ENVIRONMENT, e);
-    } catch (Exception eOther){
-      int errCode = 6018;
-      String errMsg = "Error converting read value to tuple";
-      throw new ExecException(errMsg, errCode,
-          PigException.REMOTE_ENVIRONMENT, eOther);
-    }
-
-  }
-
-  @Override
-  public void prepareToRead(RecordReader reader, PigSplit arg1) throws IOException {
-    this.reader = reader;
-  }
-
-  @Override
-  public ResourceStatistics getStatistics(String location, Job job) throws IOException {
-    // statistics not implemented currently
-    return null;
-  }
-
-  @Override
-  public List<OperatorSet> getFeatures() {
-    return Arrays.asList(LoadPushDown.OperatorSet.PROJECTION);
-  }
-
-  @Override
-  public RequiredFieldResponse pushProjection(RequiredFieldList requiredFieldsInfo) throws FrontendException {
-    // Store the required fields information in the UDFContext so that we
-    // can retrieve it later.
-    storeInUDFContext(signature, PRUNE_PROJECTION_INFO, requiredFieldsInfo);
-
-    // HCat will always prune columns based on what we ask of it - so the
-    // response is true
-    return new RequiredFieldResponse(true);
-  }
-
-  @Override
-  public void setUDFContextSignature(String signature) {
-    this.signature = signature;
-  }
-
-
-  // helper methods
-  protected void storeInUDFContext(String signature, String key, Object value) {
-    UDFContext udfContext = UDFContext.getUDFContext();
-    Properties props = udfContext.getUDFProperties(
-        this.getClass(), new String[] {signature});
-    props.put(key, value);
-  }
-
-  /**
-   * A utility method to get the size of inputs. This is accomplished by summing the
-   * size of all input paths on supported FileSystems. Locations whose size cannot be
-   * determined are ignored. Note non-FileSystem and unpartitioned locations will not
-   * report their input size by default.
-   */
-  protected static long getSizeInBytes(InputJobInfo inputJobInfo) throws IOException {
-    Configuration conf = new Configuration();
-    long sizeInBytes = 0;
-
-    for (PartInfo partInfo : inputJobInfo.getPartitions()) {
-      try {
-        Path p = new Path(partInfo.getLocation());
-        if (p.getFileSystem(conf).isFile(p)) {
-          sizeInBytes += p.getFileSystem(conf).getFileStatus(p).getLen();
-        } else {
-          for (FileStatus child : p.getFileSystem(conf).listStatus(p)) {
-            sizeInBytes += child.getLen();
-          }
+    @Override
+    public Tuple getNext() throws IOException {
+        try {
+            HCatRecord hr = (HCatRecord) (reader.nextKeyValue() ? reader.getCurrentValue() : null);
+            Tuple t = PigHCatUtil.transformToTuple(hr, outputSchema);
+            // TODO : we were discussing an iter interface, and also a LazyTuple
+            // change this when plans for that solidifies.
+            return t;
+        } catch (ExecException e) {
+            int errCode = 6018;
+            String errMsg = "Error while reading input";
+            throw new ExecException(errMsg, errCode,
+                PigException.REMOTE_ENVIRONMENT, e);
+        } catch (Exception eOther) {
+            int errCode = 6018;
+            String errMsg = "Error converting read value to tuple";
+            throw new ExecException(errMsg, errCode,
+                PigException.REMOTE_ENVIRONMENT, eOther);
         }
-      } catch (IOException e) {
-        // Report size to the extent possible.
-      }
+
     }
 
-    return sizeInBytes;
-  }
+    @Override
+    public void prepareToRead(RecordReader reader, PigSplit arg1) throws IOException {
+        this.reader = reader;
+    }
+
+    @Override
+    public ResourceStatistics getStatistics(String location, Job job) throws IOException {
+        // statistics not implemented currently
+        return null;
+    }
+
+    @Override
+    public List<OperatorSet> getFeatures() {
+        return Arrays.asList(LoadPushDown.OperatorSet.PROJECTION);
+    }
+
+    @Override
+    public RequiredFieldResponse pushProjection(RequiredFieldList requiredFieldsInfo) throws FrontendException {
+        // Store the required fields information in the UDFContext so that we
+        // can retrieve it later.
+        storeInUDFContext(signature, PRUNE_PROJECTION_INFO, requiredFieldsInfo);
+
+        // HCat will always prune columns based on what we ask of it - so the
+        // response is true
+        return new RequiredFieldResponse(true);
+    }
+
+    @Override
+    public void setUDFContextSignature(String signature) {
+        this.signature = signature;
+    }
+
+
+    // helper methods
+    protected void storeInUDFContext(String signature, String key, Object value) {
+        UDFContext udfContext = UDFContext.getUDFContext();
+        Properties props = udfContext.getUDFProperties(
+            this.getClass(), new String[]{signature});
+        props.put(key, value);
+    }
+
+    /**
+     * A utility method to get the size of inputs. This is accomplished by summing the
+     * size of all input paths on supported FileSystems. Locations whose size cannot be
+     * determined are ignored. Note non-FileSystem and unpartitioned locations will not
+     * report their input size by default.
+     */
+    protected static long getSizeInBytes(InputJobInfo inputJobInfo) throws IOException {
+        Configuration conf = new Configuration();
+        long sizeInBytes = 0;
+
+        for (PartInfo partInfo : inputJobInfo.getPartitions()) {
+            try {
+                Path p = new Path(partInfo.getLocation());
+                if (p.getFileSystem(conf).isFile(p)) {
+                    sizeInBytes += p.getFileSystem(conf).getFileStatus(p).getLen();
+                } else {
+                    for (FileStatus child : p.getFileSystem(conf).listStatus(p)) {
+                        sizeInBytes += child.getLen();
+                    }
+                }
+            } catch (IOException e) {
+                // Report size to the extent possible.
+            }
+        }
+
+        return sizeInBytes;
+    }
 }
diff --git a/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatBaseStorer.java b/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatBaseStorer.java
index 458c755..b3ba293 100644
--- a/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatBaseStorer.java
+++ b/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatBaseStorer.java
@@ -60,382 +60,382 @@
 
 public abstract class HCatBaseStorer extends StoreFunc implements StoreMetadata {
 
-  private static final List<Type> SUPPORTED_INTEGER_CONVERSIONS =
-      Lists.newArrayList(Type.TINYINT, Type.SMALLINT, Type.INT);
-  protected static final String COMPUTED_OUTPUT_SCHEMA = "hcat.output.schema";
-  protected final List<String> partitionKeys;
-  protected final Map<String,String> partitions;
-  protected Schema pigSchema;
-  private RecordWriter<WritableComparable<?>, HCatRecord> writer;
-  protected HCatSchema computedSchema;
-  protected static final String PIG_SCHEMA = "hcat.pig.store.schema";
-  protected String sign;
+    private static final List<Type> SUPPORTED_INTEGER_CONVERSIONS =
+        Lists.newArrayList(Type.TINYINT, Type.SMALLINT, Type.INT);
+    protected static final String COMPUTED_OUTPUT_SCHEMA = "hcat.output.schema";
+    protected final List<String> partitionKeys;
+    protected final Map<String, String> partitions;
+    protected Schema pigSchema;
+    private RecordWriter<WritableComparable<?>, HCatRecord> writer;
+    protected HCatSchema computedSchema;
+    protected static final String PIG_SCHEMA = "hcat.pig.store.schema";
+    protected String sign;
 
-  public HCatBaseStorer(String partSpecs, String schema) throws Exception {
+    public HCatBaseStorer(String partSpecs, String schema) throws Exception {
 
-    partitionKeys = new ArrayList<String>();
-    partitions = new HashMap<String, String>();
-    if(partSpecs != null && !partSpecs.trim().isEmpty()){
-      String[] partKVPs = partSpecs.split(",");
-      for(String partKVP : partKVPs){
-        String[] partKV = partKVP.split("=");
-        if(partKV.length == 2) {
-          String partKey = partKV[0].trim();
-          partitionKeys.add(partKey);
-          partitions.put(partKey, partKV[1].trim());
+        partitionKeys = new ArrayList<String>();
+        partitions = new HashMap<String, String>();
+        if (partSpecs != null && !partSpecs.trim().isEmpty()) {
+            String[] partKVPs = partSpecs.split(",");
+            for (String partKVP : partKVPs) {
+                String[] partKV = partKVP.split("=");
+                if (partKV.length == 2) {
+                    String partKey = partKV[0].trim();
+                    partitionKeys.add(partKey);
+                    partitions.put(partKey, partKV[1].trim());
+                } else {
+                    throw new FrontendException("Invalid partition column specification. " + partSpecs, PigHCatUtil.PIG_EXCEPTION_CODE);
+                }
+            }
+        }
+
+        if (schema != null) {
+            pigSchema = Utils.getSchemaFromString(schema);
+        }
+
+    }
+
+    @Override
+    public void checkSchema(ResourceSchema resourceSchema) throws IOException {
+
+        /*  Schema provided by user and the schema computed by Pig
+        * at the time of calling store must match.
+        */
+        Schema runtimeSchema = Schema.getPigSchema(resourceSchema);
+        if (pigSchema != null) {
+            if (!Schema.equals(runtimeSchema, pigSchema, false, true)) {
+                throw new FrontendException("Schema provided in store statement doesn't match with the Schema" +
+                    "returned by Pig run-time. Schema provided in HCatStorer: " + pigSchema.toString() + " Schema received from Pig runtime: " + runtimeSchema.toString(), PigHCatUtil.PIG_EXCEPTION_CODE);
+            }
         } else {
-          throw new FrontendException("Invalid partition column specification. "+partSpecs, PigHCatUtil.PIG_EXCEPTION_CODE);
+            pigSchema = runtimeSchema;
         }
-      }
+        UDFContext.getUDFContext().getUDFProperties(this.getClass(), new String[]{sign}).setProperty(PIG_SCHEMA, ObjectSerializer.serialize(pigSchema));
     }
 
-    if(schema != null) {
-      pigSchema = Utils.getSchemaFromString(schema);
-    }
-
-  }
-
-  @Override
-  public void checkSchema(ResourceSchema resourceSchema) throws IOException {
-
-    /*  Schema provided by user and the schema computed by Pig
-     * at the time of calling store must match.
+    /** Constructs HCatSchema from pigSchema. Passed tableSchema is the existing
+     * schema of the table in metastore.
      */
-    Schema runtimeSchema = Schema.getPigSchema(resourceSchema);
-    if(pigSchema != null){
-      if(! Schema.equals(runtimeSchema, pigSchema, false, true) ){
-        throw new FrontendException("Schema provided in store statement doesn't match with the Schema" +
-            "returned by Pig run-time. Schema provided in HCatStorer: "+pigSchema.toString()+ " Schema received from Pig runtime: "+runtimeSchema.toString(), PigHCatUtil.PIG_EXCEPTION_CODE);
-      }
-    } else {
-      pigSchema = runtimeSchema;
-    }
-    UDFContext.getUDFContext().getUDFProperties(this.getClass(), new String[]{sign}).setProperty(PIG_SCHEMA,ObjectSerializer.serialize(pigSchema));
-  }
+    protected HCatSchema convertPigSchemaToHCatSchema(Schema pigSchema, HCatSchema tableSchema) throws FrontendException {
+        List<HCatFieldSchema> fieldSchemas = new ArrayList<HCatFieldSchema>(pigSchema.size());
+        for (FieldSchema fSchema : pigSchema.getFields()) {
+            try {
+                HCatFieldSchema hcatFieldSchema = getColFromSchema(fSchema.alias, tableSchema);
 
-  /** Constructs HCatSchema from pigSchema. Passed tableSchema is the existing
-   * schema of the table in metastore.
-   */
-  protected HCatSchema convertPigSchemaToHCatSchema(Schema pigSchema, HCatSchema tableSchema) throws FrontendException{
-    List<HCatFieldSchema> fieldSchemas = new ArrayList<HCatFieldSchema>(pigSchema.size());
-    for(FieldSchema fSchema : pigSchema.getFields()){
-      try {
-        HCatFieldSchema hcatFieldSchema = getColFromSchema(fSchema.alias, tableSchema);
-
-        fieldSchemas.add(getHCatFSFromPigFS(fSchema, hcatFieldSchema));
-      } catch (HCatException he){
-          throw new FrontendException(he.getMessage(),PigHCatUtil.PIG_EXCEPTION_CODE,he);
-      }
-    }
-    return new HCatSchema(fieldSchemas);
-  }
-
-  public static boolean removeTupleFromBag(HCatFieldSchema hcatFieldSchema, FieldSchema bagFieldSchema) throws HCatException{
-    if (hcatFieldSchema != null && hcatFieldSchema.getArrayElementSchema().get(0).getType() != Type.STRUCT) {
-      return true;
-    }
-    // Column was not found in table schema. Its a new column
-    List<FieldSchema> tupSchema = bagFieldSchema.schema.getFields();
-    if (hcatFieldSchema == null && tupSchema.size() == 1 && (tupSchema.get(0).schema == null || (tupSchema.get(0).type == DataType.TUPLE && tupSchema.get(0).schema.size() == 1))) {
-      return true;
-    }
-    return false;
-  }
-
-
-  private HCatFieldSchema getHCatFSFromPigFS(FieldSchema fSchema, HCatFieldSchema hcatFieldSchema) throws FrontendException, HCatException{
-    byte type = fSchema.type;
-    switch(type){
-
-    case DataType.CHARARRAY:
-    case DataType.BIGCHARARRAY:
-      return new HCatFieldSchema(fSchema.alias, Type.STRING, null);
-
-    case DataType.INTEGER:
-      if (hcatFieldSchema != null) {
-        if (!SUPPORTED_INTEGER_CONVERSIONS.contains(hcatFieldSchema.getType())) {
-          throw new FrontendException("Unsupported type: " + type + "  in Pig's schema",
-            PigHCatUtil.PIG_EXCEPTION_CODE);
+                fieldSchemas.add(getHCatFSFromPigFS(fSchema, hcatFieldSchema));
+            } catch (HCatException he) {
+                throw new FrontendException(he.getMessage(), PigHCatUtil.PIG_EXCEPTION_CODE, he);
+            }
         }
-        return new HCatFieldSchema(fSchema.alias, hcatFieldSchema.getType(), null);
-      } else {
-        return new HCatFieldSchema(fSchema.alias, Type.INT, null);
-      }
-
-    case DataType.LONG:
-      return new HCatFieldSchema(fSchema.alias, Type.BIGINT, null);
-
-    case DataType.FLOAT:
-      return new HCatFieldSchema(fSchema.alias, Type.FLOAT, null);
-
-    case DataType.DOUBLE:
-      return new HCatFieldSchema(fSchema.alias, Type.DOUBLE, null);
-
-    case DataType.BYTEARRAY:
-      return new HCatFieldSchema(fSchema.alias, Type.BINARY, null);
-
-    case DataType.BAG:
-      Schema bagSchema = fSchema.schema;
-      List<HCatFieldSchema> arrFields = new ArrayList<HCatFieldSchema>(1);
-      FieldSchema field;
-      // Find out if we need to throw away the tuple or not.
-      if (removeTupleFromBag(hcatFieldSchema, fSchema)) {
-        field = bagSchema.getField(0).schema.getField(0);
-      } else {
-        field = bagSchema.getField(0);
-      }
-      arrFields.add(getHCatFSFromPigFS(field, hcatFieldSchema == null ? null : hcatFieldSchema.getArrayElementSchema().get(0)));
-      return new HCatFieldSchema(fSchema.alias, Type.ARRAY, new HCatSchema(arrFields), "");
-
-    case DataType.TUPLE:
-      List<String> fieldNames = new ArrayList<String>();
-      List<HCatFieldSchema> hcatFSs = new ArrayList<HCatFieldSchema>();
-      HCatSchema structSubSchema = hcatFieldSchema == null ? null : hcatFieldSchema.getStructSubSchema();
-      List<FieldSchema> fields = fSchema.schema.getFields();
-      for (int i = 0; i < fields.size(); i++) {
-        FieldSchema fieldSchema = fields.get(i);
-        fieldNames.add(fieldSchema.alias);
-        hcatFSs.add(getHCatFSFromPigFS(fieldSchema, structSubSchema == null ? null : structSubSchema.get(i)));
-      }
-      return new HCatFieldSchema(fSchema.alias, Type.STRUCT, new HCatSchema(hcatFSs), "");
-
-    case DataType.MAP:{
-      // Pig's schema contain no type information about map's keys and
-      // values. So, if its a new column assume <string,string> if its existing
-      // return whatever is contained in the existing column.
-
-      HCatFieldSchema valFS;
-      List<HCatFieldSchema> valFSList = new ArrayList<HCatFieldSchema>(1);
-
-      if(hcatFieldSchema != null){
-        return new HCatFieldSchema(fSchema.alias, Type.MAP, Type.STRING, hcatFieldSchema.getMapValueSchema(), "");
-      }
-
-      // Column not found in target table. Its a new column. Its schema is map<string,string>
-      valFS = new HCatFieldSchema(fSchema.alias, Type.STRING, "");
-      valFSList.add(valFS);
-      return new HCatFieldSchema(fSchema.alias,Type.MAP,Type.STRING, new HCatSchema(valFSList),"");
-     }
-
-    default:
-      throw new FrontendException("Unsupported type: "+type+"  in Pig's schema", PigHCatUtil.PIG_EXCEPTION_CODE);
-    }
-  }
-
-  @Override
-  public void prepareToWrite(RecordWriter writer) throws IOException {
-    this.writer = writer;
-    computedSchema = (HCatSchema)ObjectSerializer.deserialize(UDFContext.getUDFContext().getUDFProperties(this.getClass(), new String[]{sign}).getProperty(COMPUTED_OUTPUT_SCHEMA));
-  }
-
-  @Override
-  public void putNext(Tuple tuple) throws IOException {
-
-    List<Object> outgoing = new ArrayList<Object>(tuple.size());
-
-    int i = 0;
-    for(HCatFieldSchema fSchema : computedSchema.getFields()){
-      outgoing.add(getJavaObj(tuple.get(i++), fSchema));
-    }
-    try {
-      writer.write(null, new DefaultHCatRecord(outgoing));
-    } catch (InterruptedException e) {
-      throw new BackendException("Error while writing tuple: "+tuple, PigHCatUtil.PIG_EXCEPTION_CODE, e);
-    }
-  }
-
-  private Object getJavaObj(Object pigObj, HCatFieldSchema hcatFS) throws HCatException, BackendException{
-    try {
-
-      // The real work-horse. Spend time and energy in this method if there is
-      // need to keep HCatStorer lean and go fast.
-      Type type = hcatFS.getType();
-      switch(type){
-
-      case BINARY:
-        if (pigObj == null) {
-          return null;
-        }          
-        return ((DataByteArray)pigObj).get();
-
-      case STRUCT:
-        if (pigObj == null) {
-          return null;
-        }
-        HCatSchema structSubSchema = hcatFS.getStructSubSchema();
-        // Unwrap the tuple.
-        List<Object> all = ((Tuple)pigObj).getAll();
-        ArrayList<Object> converted = new ArrayList<Object>(all.size());
-        for (int i = 0; i < all.size(); i++) {
-          converted.add(getJavaObj(all.get(i), structSubSchema.get(i)));
-        }
-        return converted;
-
-      case ARRAY:
-        if (pigObj == null) {
-          return null;
-        }
-        // Unwrap the bag.
-        DataBag pigBag = (DataBag)pigObj;
-        HCatFieldSchema tupFS = hcatFS.getArrayElementSchema().get(0);
-        boolean needTuple = tupFS.getType() == Type.STRUCT;
-        List<Object> bagContents = new ArrayList<Object>((int)pigBag.size());
-        Iterator<Tuple> bagItr = pigBag.iterator();
-
-        while(bagItr.hasNext()){
-          // If there is only one element in tuple contained in bag, we throw away the tuple.
-          bagContents.add(getJavaObj(needTuple ? bagItr.next() : bagItr.next().get(0), tupFS));
-
-        }
-        return bagContents;
-      case MAP:
-        if (pigObj == null) {
-          return null;
-        }
-        Map<?,?> pigMap = (Map<?,?>)pigObj;
-        Map<Object,Object> typeMap = new HashMap<Object, Object>();
-        for(Entry<?, ?> entry: pigMap.entrySet()){
-          // the value has a schema and not a FieldSchema
-          typeMap.put(
-              // Schema validation enforces that the Key is a String
-              (String)entry.getKey(),
-              getJavaObj(entry.getValue(), hcatFS.getMapValueSchema().get(0)));
-        }
-        return typeMap;
-      case STRING:
-      case INT:
-      case BIGINT:
-      case FLOAT:
-      case DOUBLE:
-        return pigObj;
-      case SMALLINT:
-        if (pigObj == null) {
-          return null;
-        }
-        if ((Integer) pigObj < Short.MIN_VALUE || (Integer) pigObj > Short.MAX_VALUE) {
-          throw new BackendException("Value " + pigObj + " is outside the bounds of column " +
-              hcatFS.getName() + " with type " + hcatFS.getType(), PigHCatUtil.PIG_EXCEPTION_CODE);
-        }
-        return ((Integer) pigObj).shortValue();
-      case TINYINT:
-        if (pigObj == null) {
-          return null;
-        }
-        if ((Integer) pigObj < Byte.MIN_VALUE || (Integer) pigObj > Byte.MAX_VALUE) {
-          throw new BackendException("Value " + pigObj + " is outside the bounds of column " +
-              hcatFS.getName() + " with type " + hcatFS.getType(), PigHCatUtil.PIG_EXCEPTION_CODE);
-        }
-        return ((Integer) pigObj).byteValue();
-      case BOOLEAN:
-        // would not pass schema validation anyway
-        throw new BackendException("Incompatible type "+type+" found in hcat table schema: "+hcatFS, PigHCatUtil.PIG_EXCEPTION_CODE);
-      default:
-        throw new BackendException("Unexpected type "+type+" for value "+pigObj + (pigObj == null ? "" : " of class " + pigObj.getClass().getName()), PigHCatUtil.PIG_EXCEPTION_CODE);
-      }
-    } catch (BackendException e) {
-      // provide the path to the field in the error message
-      throw new BackendException(
-          (hcatFS.getName() == null ? " " : hcatFS.getName()+".") + e.getMessage(),
-          e.getCause() == null ? e : e.getCause());
-    }
-  }
-
-  @Override
-  public String relToAbsPathForStoreLocation(String location, Path curDir) throws IOException {
-
-    // Need to necessarily override this method since default impl assumes HDFS
-    // based location string.
-    return location;
-  }
-
-  @Override
-  public void setStoreFuncUDFContextSignature(String signature) {
-    sign = signature;
-  }
-
-
-  protected void doSchemaValidations(Schema pigSchema, HCatSchema tblSchema) throws FrontendException, HCatException{
-
-    // Iterate through all the elements in Pig Schema and do validations as
-    // dictated by semantics, consult HCatSchema of table when need be.
-
-    for(FieldSchema pigField : pigSchema.getFields()){
-      HCatFieldSchema hcatField = getColFromSchema(pigField.alias, tblSchema);
-      validateSchema(pigField, hcatField);
+        return new HCatSchema(fieldSchemas);
     }
 
-    try {
-      PigHCatUtil.validateHCatTableSchemaFollowsPigRules(tblSchema);
-    } catch (IOException e) {
-      throw new FrontendException("HCatalog schema is not compatible with Pig: "+e.getMessage(),  PigHCatUtil.PIG_EXCEPTION_CODE, e);
-    }
-  }
-
-
-  private void validateSchema(FieldSchema pigField, HCatFieldSchema hcatField)
-      throws HCatException, FrontendException {
-    validateAlias(pigField.alias);
-    byte type = pigField.type;
-    if(DataType.isComplex(type)){
-      switch(type){
-
-      case DataType.MAP:
-        if(hcatField != null){
-          if(hcatField.getMapKeyType() != Type.STRING){
-            throw new FrontendException("Key Type of map must be String "+hcatField,  PigHCatUtil.PIG_EXCEPTION_CODE);
-          }
-          // Map values can be primitive or complex
+    public static boolean removeTupleFromBag(HCatFieldSchema hcatFieldSchema, FieldSchema bagFieldSchema) throws HCatException {
+        if (hcatFieldSchema != null && hcatFieldSchema.getArrayElementSchema().get(0).getType() != Type.STRUCT) {
+            return true;
         }
-        break;
-
-      case DataType.BAG:
-        HCatSchema arrayElementSchema = hcatField == null ? null : hcatField.getArrayElementSchema();
-        for(FieldSchema innerField : pigField.schema.getField(0).schema.getFields()){
-          validateSchema(innerField, getColFromSchema(pigField.alias, arrayElementSchema));
+        // Column was not found in table schema. Its a new column
+        List<FieldSchema> tupSchema = bagFieldSchema.schema.getFields();
+        if (hcatFieldSchema == null && tupSchema.size() == 1 && (tupSchema.get(0).schema == null || (tupSchema.get(0).type == DataType.TUPLE && tupSchema.get(0).schema.size() == 1))) {
+            return true;
         }
-        break;
+        return false;
+    }
 
-      case DataType.TUPLE:
-        HCatSchema structSubSchema = hcatField == null ? null : hcatField.getStructSubSchema();
-        for(FieldSchema innerField : pigField.schema.getFields()){
-          validateSchema(innerField, getColFromSchema(pigField.alias, structSubSchema));
+
+    private HCatFieldSchema getHCatFSFromPigFS(FieldSchema fSchema, HCatFieldSchema hcatFieldSchema) throws FrontendException, HCatException {
+        byte type = fSchema.type;
+        switch (type) {
+
+        case DataType.CHARARRAY:
+        case DataType.BIGCHARARRAY:
+            return new HCatFieldSchema(fSchema.alias, Type.STRING, null);
+
+        case DataType.INTEGER:
+            if (hcatFieldSchema != null) {
+                if (!SUPPORTED_INTEGER_CONVERSIONS.contains(hcatFieldSchema.getType())) {
+                    throw new FrontendException("Unsupported type: " + type + "  in Pig's schema",
+                        PigHCatUtil.PIG_EXCEPTION_CODE);
+                }
+                return new HCatFieldSchema(fSchema.alias, hcatFieldSchema.getType(), null);
+            } else {
+                return new HCatFieldSchema(fSchema.alias, Type.INT, null);
+            }
+
+        case DataType.LONG:
+            return new HCatFieldSchema(fSchema.alias, Type.BIGINT, null);
+
+        case DataType.FLOAT:
+            return new HCatFieldSchema(fSchema.alias, Type.FLOAT, null);
+
+        case DataType.DOUBLE:
+            return new HCatFieldSchema(fSchema.alias, Type.DOUBLE, null);
+
+        case DataType.BYTEARRAY:
+            return new HCatFieldSchema(fSchema.alias, Type.BINARY, null);
+
+        case DataType.BAG:
+            Schema bagSchema = fSchema.schema;
+            List<HCatFieldSchema> arrFields = new ArrayList<HCatFieldSchema>(1);
+            FieldSchema field;
+            // Find out if we need to throw away the tuple or not.
+            if (removeTupleFromBag(hcatFieldSchema, fSchema)) {
+                field = bagSchema.getField(0).schema.getField(0);
+            } else {
+                field = bagSchema.getField(0);
+            }
+            arrFields.add(getHCatFSFromPigFS(field, hcatFieldSchema == null ? null : hcatFieldSchema.getArrayElementSchema().get(0)));
+            return new HCatFieldSchema(fSchema.alias, Type.ARRAY, new HCatSchema(arrFields), "");
+
+        case DataType.TUPLE:
+            List<String> fieldNames = new ArrayList<String>();
+            List<HCatFieldSchema> hcatFSs = new ArrayList<HCatFieldSchema>();
+            HCatSchema structSubSchema = hcatFieldSchema == null ? null : hcatFieldSchema.getStructSubSchema();
+            List<FieldSchema> fields = fSchema.schema.getFields();
+            for (int i = 0; i < fields.size(); i++) {
+                FieldSchema fieldSchema = fields.get(i);
+                fieldNames.add(fieldSchema.alias);
+                hcatFSs.add(getHCatFSFromPigFS(fieldSchema, structSubSchema == null ? null : structSubSchema.get(i)));
+            }
+            return new HCatFieldSchema(fSchema.alias, Type.STRUCT, new HCatSchema(hcatFSs), "");
+
+        case DataType.MAP: {
+            // Pig's schema contain no type information about map's keys and
+            // values. So, if its a new column assume <string,string> if its existing
+            // return whatever is contained in the existing column.
+
+            HCatFieldSchema valFS;
+            List<HCatFieldSchema> valFSList = new ArrayList<HCatFieldSchema>(1);
+
+            if (hcatFieldSchema != null) {
+                return new HCatFieldSchema(fSchema.alias, Type.MAP, Type.STRING, hcatFieldSchema.getMapValueSchema(), "");
+            }
+
+            // Column not found in target table. Its a new column. Its schema is map<string,string>
+            valFS = new HCatFieldSchema(fSchema.alias, Type.STRING, "");
+            valFSList.add(valFS);
+            return new HCatFieldSchema(fSchema.alias, Type.MAP, Type.STRING, new HCatSchema(valFSList), "");
         }
-        break;
 
-      default:
-        throw new FrontendException("Internal Error.", PigHCatUtil.PIG_EXCEPTION_CODE);
-      }
-    }
-  }
-
-  private void validateAlias(String alias) throws FrontendException{
-    if(alias == null) {
-      throw new FrontendException("Column name for a field is not specified. Please provide the full schema as an argument to HCatStorer.", PigHCatUtil.PIG_EXCEPTION_CODE);
-    }
-    if(alias.matches(".*[A-Z]+.*")) {
-      throw new FrontendException("Column names should all be in lowercase. Invalid name found: "+alias, PigHCatUtil.PIG_EXCEPTION_CODE);
-    }
-  }
-
-  // Finds column by name in HCatSchema, if not found returns null.
-  private HCatFieldSchema getColFromSchema(String alias, HCatSchema tblSchema){
-    if (tblSchema != null) {
-      for(HCatFieldSchema hcatField : tblSchema.getFields()){
-        if(hcatField!=null && hcatField.getName()!= null && hcatField.getName().equalsIgnoreCase(alias)){
-          return hcatField;
+        default:
+            throw new FrontendException("Unsupported type: " + type + "  in Pig's schema", PigHCatUtil.PIG_EXCEPTION_CODE);
         }
-      }
     }
-    // Its a new column
-    return null;
-  }
 
-  @Override
-  public void cleanupOnFailure(String location, Job job) throws IOException {
-    // No-op.
-  }
+    @Override
+    public void prepareToWrite(RecordWriter writer) throws IOException {
+        this.writer = writer;
+        computedSchema = (HCatSchema) ObjectSerializer.deserialize(UDFContext.getUDFContext().getUDFProperties(this.getClass(), new String[]{sign}).getProperty(COMPUTED_OUTPUT_SCHEMA));
+    }
 
-  @Override
-  public void storeStatistics(ResourceStatistics stats, String arg1, Job job) throws IOException {
-  }
+    @Override
+    public void putNext(Tuple tuple) throws IOException {
+
+        List<Object> outgoing = new ArrayList<Object>(tuple.size());
+
+        int i = 0;
+        for (HCatFieldSchema fSchema : computedSchema.getFields()) {
+            outgoing.add(getJavaObj(tuple.get(i++), fSchema));
+        }
+        try {
+            writer.write(null, new DefaultHCatRecord(outgoing));
+        } catch (InterruptedException e) {
+            throw new BackendException("Error while writing tuple: " + tuple, PigHCatUtil.PIG_EXCEPTION_CODE, e);
+        }
+    }
+
+    private Object getJavaObj(Object pigObj, HCatFieldSchema hcatFS) throws HCatException, BackendException {
+        try {
+
+            // The real work-horse. Spend time and energy in this method if there is
+            // need to keep HCatStorer lean and go fast.
+            Type type = hcatFS.getType();
+            switch (type) {
+
+            case BINARY:
+                if (pigObj == null) {
+                    return null;
+                }
+                return ((DataByteArray) pigObj).get();
+
+            case STRUCT:
+                if (pigObj == null) {
+                    return null;
+                }
+                HCatSchema structSubSchema = hcatFS.getStructSubSchema();
+                // Unwrap the tuple.
+                List<Object> all = ((Tuple) pigObj).getAll();
+                ArrayList<Object> converted = new ArrayList<Object>(all.size());
+                for (int i = 0; i < all.size(); i++) {
+                    converted.add(getJavaObj(all.get(i), structSubSchema.get(i)));
+                }
+                return converted;
+
+            case ARRAY:
+                if (pigObj == null) {
+                    return null;
+                }
+                // Unwrap the bag.
+                DataBag pigBag = (DataBag) pigObj;
+                HCatFieldSchema tupFS = hcatFS.getArrayElementSchema().get(0);
+                boolean needTuple = tupFS.getType() == Type.STRUCT;
+                List<Object> bagContents = new ArrayList<Object>((int) pigBag.size());
+                Iterator<Tuple> bagItr = pigBag.iterator();
+
+                while (bagItr.hasNext()) {
+                    // If there is only one element in tuple contained in bag, we throw away the tuple.
+                    bagContents.add(getJavaObj(needTuple ? bagItr.next() : bagItr.next().get(0), tupFS));
+
+                }
+                return bagContents;
+            case MAP:
+                if (pigObj == null) {
+                    return null;
+                }
+                Map<?, ?> pigMap = (Map<?, ?>) pigObj;
+                Map<Object, Object> typeMap = new HashMap<Object, Object>();
+                for (Entry<?, ?> entry : pigMap.entrySet()) {
+                    // the value has a schema and not a FieldSchema
+                    typeMap.put(
+                        // Schema validation enforces that the Key is a String
+                        (String) entry.getKey(),
+                        getJavaObj(entry.getValue(), hcatFS.getMapValueSchema().get(0)));
+                }
+                return typeMap;
+            case STRING:
+            case INT:
+            case BIGINT:
+            case FLOAT:
+            case DOUBLE:
+                return pigObj;
+            case SMALLINT:
+                if (pigObj == null) {
+                    return null;
+                }
+                if ((Integer) pigObj < Short.MIN_VALUE || (Integer) pigObj > Short.MAX_VALUE) {
+                    throw new BackendException("Value " + pigObj + " is outside the bounds of column " +
+                        hcatFS.getName() + " with type " + hcatFS.getType(), PigHCatUtil.PIG_EXCEPTION_CODE);
+                }
+                return ((Integer) pigObj).shortValue();
+            case TINYINT:
+                if (pigObj == null) {
+                    return null;
+                }
+                if ((Integer) pigObj < Byte.MIN_VALUE || (Integer) pigObj > Byte.MAX_VALUE) {
+                    throw new BackendException("Value " + pigObj + " is outside the bounds of column " +
+                        hcatFS.getName() + " with type " + hcatFS.getType(), PigHCatUtil.PIG_EXCEPTION_CODE);
+                }
+                return ((Integer) pigObj).byteValue();
+            case BOOLEAN:
+                // would not pass schema validation anyway
+                throw new BackendException("Incompatible type " + type + " found in hcat table schema: " + hcatFS, PigHCatUtil.PIG_EXCEPTION_CODE);
+            default:
+                throw new BackendException("Unexpected type " + type + " for value " + pigObj + (pigObj == null ? "" : " of class " + pigObj.getClass().getName()), PigHCatUtil.PIG_EXCEPTION_CODE);
+            }
+        } catch (BackendException e) {
+            // provide the path to the field in the error message
+            throw new BackendException(
+                (hcatFS.getName() == null ? " " : hcatFS.getName() + ".") + e.getMessage(),
+                e.getCause() == null ? e : e.getCause());
+        }
+    }
+
+    @Override
+    public String relToAbsPathForStoreLocation(String location, Path curDir) throws IOException {
+
+        // Need to necessarily override this method since default impl assumes HDFS
+        // based location string.
+        return location;
+    }
+
+    @Override
+    public void setStoreFuncUDFContextSignature(String signature) {
+        sign = signature;
+    }
+
+
+    protected void doSchemaValidations(Schema pigSchema, HCatSchema tblSchema) throws FrontendException, HCatException {
+
+        // Iterate through all the elements in Pig Schema and do validations as
+        // dictated by semantics, consult HCatSchema of table when need be.
+
+        for (FieldSchema pigField : pigSchema.getFields()) {
+            HCatFieldSchema hcatField = getColFromSchema(pigField.alias, tblSchema);
+            validateSchema(pigField, hcatField);
+        }
+
+        try {
+            PigHCatUtil.validateHCatTableSchemaFollowsPigRules(tblSchema);
+        } catch (IOException e) {
+            throw new FrontendException("HCatalog schema is not compatible with Pig: " + e.getMessage(), PigHCatUtil.PIG_EXCEPTION_CODE, e);
+        }
+    }
+
+
+    private void validateSchema(FieldSchema pigField, HCatFieldSchema hcatField)
+        throws HCatException, FrontendException {
+        validateAlias(pigField.alias);
+        byte type = pigField.type;
+        if (DataType.isComplex(type)) {
+            switch (type) {
+
+            case DataType.MAP:
+                if (hcatField != null) {
+                    if (hcatField.getMapKeyType() != Type.STRING) {
+                        throw new FrontendException("Key Type of map must be String " + hcatField, PigHCatUtil.PIG_EXCEPTION_CODE);
+                    }
+                    // Map values can be primitive or complex
+                }
+                break;
+
+            case DataType.BAG:
+                HCatSchema arrayElementSchema = hcatField == null ? null : hcatField.getArrayElementSchema();
+                for (FieldSchema innerField : pigField.schema.getField(0).schema.getFields()) {
+                    validateSchema(innerField, getColFromSchema(pigField.alias, arrayElementSchema));
+                }
+                break;
+
+            case DataType.TUPLE:
+                HCatSchema structSubSchema = hcatField == null ? null : hcatField.getStructSubSchema();
+                for (FieldSchema innerField : pigField.schema.getFields()) {
+                    validateSchema(innerField, getColFromSchema(pigField.alias, structSubSchema));
+                }
+                break;
+
+            default:
+                throw new FrontendException("Internal Error.", PigHCatUtil.PIG_EXCEPTION_CODE);
+            }
+        }
+    }
+
+    private void validateAlias(String alias) throws FrontendException {
+        if (alias == null) {
+            throw new FrontendException("Column name for a field is not specified. Please provide the full schema as an argument to HCatStorer.", PigHCatUtil.PIG_EXCEPTION_CODE);
+        }
+        if (alias.matches(".*[A-Z]+.*")) {
+            throw new FrontendException("Column names should all be in lowercase. Invalid name found: " + alias, PigHCatUtil.PIG_EXCEPTION_CODE);
+        }
+    }
+
+    // Finds column by name in HCatSchema, if not found returns null.
+    private HCatFieldSchema getColFromSchema(String alias, HCatSchema tblSchema) {
+        if (tblSchema != null) {
+            for (HCatFieldSchema hcatField : tblSchema.getFields()) {
+                if (hcatField != null && hcatField.getName() != null && hcatField.getName().equalsIgnoreCase(alias)) {
+                    return hcatField;
+                }
+            }
+        }
+        // Its a new column
+        return null;
+    }
+
+    @Override
+    public void cleanupOnFailure(String location, Job job) throws IOException {
+        // No-op.
+    }
+
+    @Override
+    public void storeStatistics(ResourceStatistics stats, String arg1, Job job) throws IOException {
+    }
 }
diff --git a/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatLoader.java b/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatLoader.java
index 2e05a80..2b001f8 100644
--- a/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatLoader.java
+++ b/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatLoader.java
@@ -51,59 +51,59 @@
 
 public class HCatLoader extends HCatBaseLoader {
 
-  private static final String PARTITION_FILTER = "partition.filter"; // for future use
+    private static final String PARTITION_FILTER = "partition.filter"; // for future use
 
-  private HCatInputFormat hcatInputFormat = null;
-  private String dbName;
-  private String tableName;
-  private String hcatServerUri;
-  private String partitionFilterString;
-  private final PigHCatUtil phutil = new PigHCatUtil();
+    private HCatInputFormat hcatInputFormat = null;
+    private String dbName;
+    private String tableName;
+    private String hcatServerUri;
+    private String partitionFilterString;
+    private final PigHCatUtil phutil = new PigHCatUtil();
 
-  // Signature for wrapped loader, see comments in LoadFuncBasedInputDriver.initialize
-  final public static String INNER_SIGNATURE = "hcatloader.inner.signature";
-  final public static String INNER_SIGNATURE_PREFIX = "hcatloader_inner_signature";
-  // A hash map which stores job credentials. The key is a signature passed by Pig, which is
-  //unique to the load func and input file name (table, in our case).
-  private static Map<String, Credentials> jobCredentials = new HashMap<String, Credentials>();
+    // Signature for wrapped loader, see comments in LoadFuncBasedInputDriver.initialize
+    final public static String INNER_SIGNATURE = "hcatloader.inner.signature";
+    final public static String INNER_SIGNATURE_PREFIX = "hcatloader_inner_signature";
+    // A hash map which stores job credentials. The key is a signature passed by Pig, which is
+    //unique to the load func and input file name (table, in our case).
+    private static Map<String, Credentials> jobCredentials = new HashMap<String, Credentials>();
 
-  @Override
-  public InputFormat<?,?> getInputFormat() throws IOException {
-    if(hcatInputFormat == null) {
-      hcatInputFormat = new HCatInputFormat();
+    @Override
+    public InputFormat<?, ?> getInputFormat() throws IOException {
+        if (hcatInputFormat == null) {
+            hcatInputFormat = new HCatInputFormat();
+        }
+        return hcatInputFormat;
     }
-    return hcatInputFormat;
-  }
 
-  @Override
-  public String relativeToAbsolutePath(String location, Path curDir) throws IOException {
-    return location;
-  }
+    @Override
+    public String relativeToAbsolutePath(String location, Path curDir) throws IOException {
+        return location;
+    }
 
-@Override
-  public void setLocation(String location, Job job) throws IOException {
+    @Override
+    public void setLocation(String location, Job job) throws IOException {
 
-    UDFContext udfContext = UDFContext.getUDFContext();
-    Properties udfProps = udfContext.getUDFProperties(this.getClass(),
-        new String[]{signature});
-    job.getConfiguration().set(INNER_SIGNATURE, INNER_SIGNATURE_PREFIX + "_" + signature);
-    Pair<String, String> dbTablePair = PigHCatUtil.getDBTableNames(location);
-    dbName = dbTablePair.first;
-    tableName = dbTablePair.second;
+        UDFContext udfContext = UDFContext.getUDFContext();
+        Properties udfProps = udfContext.getUDFProperties(this.getClass(),
+            new String[]{signature});
+        job.getConfiguration().set(INNER_SIGNATURE, INNER_SIGNATURE_PREFIX + "_" + signature);
+        Pair<String, String> dbTablePair = PigHCatUtil.getDBTableNames(location);
+        dbName = dbTablePair.first;
+        tableName = dbTablePair.second;
 
-    RequiredFieldList requiredFieldsInfo = (RequiredFieldList) udfProps
-    .get(PRUNE_PROJECTION_INFO);
-    // get partitionFilterString stored in the UDFContext - it would have
-    // been stored there by an earlier call to setPartitionFilter
-    // call setInput on HCatInputFormat only in the frontend because internally
-    // it makes calls to the hcat server - we don't want these to happen in
-    // the backend
-    // in the hadoop front end mapred.task.id property will not be set in
-    // the Configuration
+        RequiredFieldList requiredFieldsInfo = (RequiredFieldList) udfProps
+            .get(PRUNE_PROJECTION_INFO);
+        // get partitionFilterString stored in the UDFContext - it would have
+        // been stored there by an earlier call to setPartitionFilter
+        // call setInput on HCatInputFormat only in the frontend because internally
+        // it makes calls to the hcat server - we don't want these to happen in
+        // the backend
+        // in the hadoop front end mapred.task.id property will not be set in
+        // the Configuration
         if (udfProps.containsKey(HCatConstants.HCAT_PIG_LOADER_LOCATION_SET)) {
-            for( Enumeration<Object> emr = udfProps.keys();emr.hasMoreElements();) {
+            for (Enumeration<Object> emr = udfProps.keys(); emr.hasMoreElements(); ) {
                 PigHCatUtil.getConfigFromUDFProperties(udfProps,
-                            job.getConfiguration(), emr.nextElement().toString());
+                    job.getConfiguration(), emr.nextElement().toString());
             }
             if (!HCatUtil.checkJobContextIfRunningFromBackend(job)) {
                 //Combine credentials and credentials from job takes precedence for freshness
@@ -114,12 +114,12 @@
         } else {
             Job clone = new Job(job.getConfiguration());
             HCatInputFormat.setInput(job, InputJobInfo.create(dbName,
-                    tableName, getPartitionFilterString()));
+                tableName, getPartitionFilterString()));
 
             // We will store all the new /changed properties in the job in the
             // udf context, so the the HCatInputFormat.setInput method need not
             //be called many times.
-            for (Entry<String,String> keyValue : job.getConfiguration()) {
+            for (Entry<String, String> keyValue : job.getConfiguration()) {
                 String oldValue = clone.getConfiguration().getRaw(keyValue.getKey());
                 if ((oldValue == null) || (keyValue.getValue().equals(oldValue) == false)) {
                     udfProps.put(keyValue.getKey(), keyValue.getValue());
@@ -144,129 +144,129 @@
         // here will ensure we communicate to HCatInputFormat about pruned
         // projections at getSplits() and createRecordReader() time
 
-        if(requiredFieldsInfo != null) {
-          // convert to hcatschema and pass to HCatInputFormat
-          try {
-            outputSchema = phutil.getHCatSchema(requiredFieldsInfo.getFields(),signature,this.getClass());
-            HCatInputFormat.setOutputSchema(job, outputSchema);
-          } catch (Exception e) {
-            throw new IOException(e);
-          }
-        } else{
-          // else - this means pig's optimizer never invoked the pushProjection
-          // method - so we need all fields and hence we should not call the
-          // setOutputSchema on HCatInputFormat
-          if (HCatUtil.checkJobContextIfRunningFromBackend(job)){
+        if (requiredFieldsInfo != null) {
+            // convert to hcatschema and pass to HCatInputFormat
             try {
-              HCatSchema hcatTableSchema = (HCatSchema) udfProps.get(HCatConstants.HCAT_TABLE_SCHEMA);
-              outputSchema = hcatTableSchema;
-              HCatInputFormat.setOutputSchema(job, outputSchema);
+                outputSchema = phutil.getHCatSchema(requiredFieldsInfo.getFields(), signature, this.getClass());
+                HCatInputFormat.setOutputSchema(job, outputSchema);
             } catch (Exception e) {
-              throw new IOException(e);
+                throw new IOException(e);
             }
-          }
+        } else {
+            // else - this means pig's optimizer never invoked the pushProjection
+            // method - so we need all fields and hence we should not call the
+            // setOutputSchema on HCatInputFormat
+            if (HCatUtil.checkJobContextIfRunningFromBackend(job)) {
+                try {
+                    HCatSchema hcatTableSchema = (HCatSchema) udfProps.get(HCatConstants.HCAT_TABLE_SCHEMA);
+                    outputSchema = hcatTableSchema;
+                    HCatInputFormat.setOutputSchema(job, outputSchema);
+                } catch (Exception e) {
+                    throw new IOException(e);
+                }
+            }
         }
 
-  }
+    }
 
-  @Override
-  public String[] getPartitionKeys(String location, Job job)
-  throws IOException {
-    Table table = phutil.getTable(location,
-        hcatServerUri!=null?hcatServerUri:PigHCatUtil.getHCatServerUri(job),
+    @Override
+    public String[] getPartitionKeys(String location, Job job)
+        throws IOException {
+        Table table = phutil.getTable(location,
+            hcatServerUri != null ? hcatServerUri : PigHCatUtil.getHCatServerUri(job),
             PigHCatUtil.getHCatServerPrincipal(job));
-    List<FieldSchema> tablePartitionKeys = table.getPartitionKeys();
-    String[] partitionKeys = new String[tablePartitionKeys.size()];
-    for(int i = 0; i < tablePartitionKeys.size(); i++) {
-      partitionKeys[i] = tablePartitionKeys.get(i).getName();
+        List<FieldSchema> tablePartitionKeys = table.getPartitionKeys();
+        String[] partitionKeys = new String[tablePartitionKeys.size()];
+        for (int i = 0; i < tablePartitionKeys.size(); i++) {
+            partitionKeys[i] = tablePartitionKeys.get(i).getName();
+        }
+        return partitionKeys;
     }
-    return partitionKeys;
-  }
 
-  @Override
-  public ResourceSchema getSchema(String location, Job job) throws IOException {
-    HCatContext.getInstance().mergeConf(job.getConfiguration());
-    HCatContext.getInstance().getConf().setBoolean(
-        HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION, true);
+    @Override
+    public ResourceSchema getSchema(String location, Job job) throws IOException {
+        HCatContext.getInstance().mergeConf(job.getConfiguration());
+        HCatContext.getInstance().getConf().setBoolean(
+            HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION, true);
 
-    Table table = phutil.getTable(location,
-        hcatServerUri!=null?hcatServerUri:PigHCatUtil.getHCatServerUri(job),
+        Table table = phutil.getTable(location,
+            hcatServerUri != null ? hcatServerUri : PigHCatUtil.getHCatServerUri(job),
             PigHCatUtil.getHCatServerPrincipal(job));
-    HCatSchema hcatTableSchema = HCatUtil.getTableSchemaWithPtnCols(table);
-    try {
-      PigHCatUtil.validateHCatTableSchemaFollowsPigRules(hcatTableSchema);
-    } catch (IOException e){
-      throw new PigException(
-          "Table schema incompatible for reading through HCatLoader :" + e.getMessage()
-          + ";[Table schema was "+ hcatTableSchema.toString() +"]"
-          ,PigHCatUtil.PIG_EXCEPTION_CODE, e);
+        HCatSchema hcatTableSchema = HCatUtil.getTableSchemaWithPtnCols(table);
+        try {
+            PigHCatUtil.validateHCatTableSchemaFollowsPigRules(hcatTableSchema);
+        } catch (IOException e) {
+            throw new PigException(
+                "Table schema incompatible for reading through HCatLoader :" + e.getMessage()
+                    + ";[Table schema was " + hcatTableSchema.toString() + "]"
+                , PigHCatUtil.PIG_EXCEPTION_CODE, e);
+        }
+        storeInUDFContext(signature, HCatConstants.HCAT_TABLE_SCHEMA, hcatTableSchema);
+        outputSchema = hcatTableSchema;
+        return PigHCatUtil.getResourceSchema(hcatTableSchema);
     }
-    storeInUDFContext(signature, HCatConstants.HCAT_TABLE_SCHEMA, hcatTableSchema);
-    outputSchema = hcatTableSchema;
-    return PigHCatUtil.getResourceSchema(hcatTableSchema);
-  }
 
-  @Override
-  public void setPartitionFilter(Expression partitionFilter) throws IOException {
-    // convert the partition filter expression into a string expected by
-    // hcat and pass it in setLocation()
+    @Override
+    public void setPartitionFilter(Expression partitionFilter) throws IOException {
+        // convert the partition filter expression into a string expected by
+        // hcat and pass it in setLocation()
 
-    partitionFilterString = getHCatComparisonString(partitionFilter);
+        partitionFilterString = getHCatComparisonString(partitionFilter);
 
-    // store this in the udf context so we can get it later
-    storeInUDFContext(signature,
-        PARTITION_FILTER, partitionFilterString);
-  }
-
-  /**
-   * Get statistics about the data to be loaded. Only input data size is implemented at this time.
-   */
-  @Override
-  public ResourceStatistics getStatistics(String location, Job job) throws IOException {
-    try {
-      ResourceStatistics stats = new ResourceStatistics();
-      InputJobInfo inputJobInfo = (InputJobInfo) HCatUtil.deserialize(
-          job.getConfiguration().get(HCatConstants.HCAT_KEY_JOB_INFO));
-      stats.setmBytes(getSizeInBytes(inputJobInfo) / 1024 / 1024);
-      return stats;
-    } catch (Exception e) {
-      throw new IOException(e);
+        // store this in the udf context so we can get it later
+        storeInUDFContext(signature,
+            PARTITION_FILTER, partitionFilterString);
     }
-  }
 
-  private String getPartitionFilterString() {
-    if(partitionFilterString == null) {
-      Properties props = UDFContext.getUDFContext().getUDFProperties(
-          this.getClass(), new String[] {signature});
-      partitionFilterString = props.getProperty(PARTITION_FILTER);
+    /**
+     * Get statistics about the data to be loaded. Only input data size is implemented at this time.
+     */
+    @Override
+    public ResourceStatistics getStatistics(String location, Job job) throws IOException {
+        try {
+            ResourceStatistics stats = new ResourceStatistics();
+            InputJobInfo inputJobInfo = (InputJobInfo) HCatUtil.deserialize(
+                job.getConfiguration().get(HCatConstants.HCAT_KEY_JOB_INFO));
+            stats.setmBytes(getSizeInBytes(inputJobInfo) / 1024 / 1024);
+            return stats;
+        } catch (Exception e) {
+            throw new IOException(e);
+        }
     }
-    return partitionFilterString;
-  }
 
-  private String getHCatComparisonString(Expression expr) {
-    if(expr instanceof BinaryExpression){
-      // call getHCatComparisonString on lhs and rhs, and and join the
-      // results with OpType string
-
-      // we can just use OpType.toString() on all Expression types except
-      // Equal, NotEqualt since Equal has '==' in toString() and
-      // we need '='
-      String opStr = null;
-      switch(expr.getOpType()){
-        case OP_EQ :
-          opStr = " = ";
-          break;
-        default:
-          opStr = expr.getOpType().toString();
-      }
-      BinaryExpression be = (BinaryExpression)expr;
-      return "(" + getHCatComparisonString(be.getLhs()) +
-                  opStr +
-                  getHCatComparisonString(be.getRhs()) + ")";
-    } else {
-      // should be a constant or column
-      return expr.toString();
+    private String getPartitionFilterString() {
+        if (partitionFilterString == null) {
+            Properties props = UDFContext.getUDFContext().getUDFProperties(
+                this.getClass(), new String[]{signature});
+            partitionFilterString = props.getProperty(PARTITION_FILTER);
+        }
+        return partitionFilterString;
     }
-  }
+
+    private String getHCatComparisonString(Expression expr) {
+        if (expr instanceof BinaryExpression) {
+            // call getHCatComparisonString on lhs and rhs, and and join the
+            // results with OpType string
+
+            // we can just use OpType.toString() on all Expression types except
+            // Equal, NotEqualt since Equal has '==' in toString() and
+            // we need '='
+            String opStr = null;
+            switch (expr.getOpType()) {
+            case OP_EQ:
+                opStr = " = ";
+                break;
+            default:
+                opStr = expr.getOpType().toString();
+            }
+            BinaryExpression be = (BinaryExpression) expr;
+            return "(" + getHCatComparisonString(be.getLhs()) +
+                opStr +
+                getHCatComparisonString(be.getRhs()) + ")";
+        } else {
+            // should be a constant or column
+            return expr.toString();
+        }
+    }
 
 }
diff --git a/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatStorer.java b/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatStorer.java
index 9f5b4a2..c46db33 100644
--- a/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatStorer.java
+++ b/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatStorer.java
@@ -50,114 +50,114 @@
 
 public class HCatStorer extends HCatBaseStorer {
 
-  // Signature for wrapped storer, see comments in LoadFuncBasedInputDriver.initialize
-  final public static String INNER_SIGNATURE = "hcatstorer.inner.signature";
-  final public static String INNER_SIGNATURE_PREFIX = "hcatstorer_inner_signature";
-  // A hash map which stores job credentials. The key is a signature passed by Pig, which is
-  //unique to the store func and out file name (table, in our case).
-  private static Map<String, Credentials> jobCredentials = new HashMap<String, Credentials>();
+    // Signature for wrapped storer, see comments in LoadFuncBasedInputDriver.initialize
+    final public static String INNER_SIGNATURE = "hcatstorer.inner.signature";
+    final public static String INNER_SIGNATURE_PREFIX = "hcatstorer_inner_signature";
+    // A hash map which stores job credentials. The key is a signature passed by Pig, which is
+    //unique to the store func and out file name (table, in our case).
+    private static Map<String, Credentials> jobCredentials = new HashMap<String, Credentials>();
 
 
-  public HCatStorer(String partSpecs, String schema) throws Exception {
-    super(partSpecs, schema);
-  }
-
-  public HCatStorer(String partSpecs) throws Exception {
-    this(partSpecs, null);
-  }
-
-  public HCatStorer() throws Exception{
-    this(null,null);
-  }
-
-  @Override
-  public OutputFormat getOutputFormat() throws IOException {
-    return new HCatOutputFormat();
-  }
-
-  @Override
-  public void setStoreLocation(String location, Job job) throws IOException {
-    HCatContext.getInstance().mergeConf(job.getConfiguration());
-    HCatContext.getInstance().getConf().setBoolean(
-        HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION, false);
-
-    Configuration config = job.getConfiguration();
-    config.set(INNER_SIGNATURE, INNER_SIGNATURE_PREFIX + "_" + sign);
-    Properties udfProps = UDFContext.getUDFContext().getUDFProperties(
-            this.getClass(), new String[] { sign });
-    String[] userStr = location.split("\\.");
-
-    if (udfProps.containsKey(HCatConstants.HCAT_PIG_STORER_LOCATION_SET)) {
-      for(Enumeration<Object> emr = udfProps.keys();emr.hasMoreElements();){
-        PigHCatUtil.getConfigFromUDFProperties(udfProps, config, emr.nextElement().toString());
-      }
-      Credentials crd = jobCredentials.get(INNER_SIGNATURE_PREFIX + "_" + sign);
-      if (crd != null) {
-        job.getCredentials().addAll(crd);
-      }
-    } else {
-      Job clone = new Job(job.getConfiguration());
-      OutputJobInfo outputJobInfo;
-      if (userStr.length == 2) {
-        outputJobInfo = OutputJobInfo.create(userStr[0], userStr[1], partitions);
-      } else if (userStr.length == 1) {
-        outputJobInfo = OutputJobInfo.create(null, userStr[0], partitions);
-      } else {
-        throw new FrontendException("location " + location
-              + " is invalid. It must be of the form [db.]table",
-              PigHCatUtil.PIG_EXCEPTION_CODE);
-      }
-      Schema schema = (Schema) ObjectSerializer.deserialize(udfProps.getProperty(PIG_SCHEMA));
-      if (schema != null) {
-        pigSchema = schema;
-      }
-      if (pigSchema == null) {
-        throw new FrontendException(
-            "Schema for data cannot be determined.",
-            PigHCatUtil.PIG_EXCEPTION_CODE);
-      }
-      try {
-        HCatOutputFormat.setOutput(job, outputJobInfo);
-      } catch (HCatException he) {
-        // pass the message to the user - essentially something about
-        // the table
-        // information passed to HCatOutputFormat was not right
-        throw new PigException(he.getMessage(),
-            PigHCatUtil.PIG_EXCEPTION_CODE, he);
-      }
-      HCatSchema hcatTblSchema = HCatOutputFormat.getTableSchema(job);
-      try {
-        doSchemaValidations(pigSchema, hcatTblSchema);
-      } catch (HCatException he) {
-        throw new FrontendException(he.getMessage(), PigHCatUtil.PIG_EXCEPTION_CODE, he);
-      }
-      computedSchema = convertPigSchemaToHCatSchema(pigSchema, hcatTblSchema);
-      HCatOutputFormat.setSchema(job, computedSchema);
-      udfProps.setProperty(COMPUTED_OUTPUT_SCHEMA,ObjectSerializer.serialize(computedSchema));
-
-      // We will store all the new /changed properties in the job in the
-      // udf context, so the the HCatOutputFormat.setOutput and setSchema
-      // methods need not be called many times.
-      for ( Entry<String,String> keyValue : job.getConfiguration()) {
-        String oldValue = clone.getConfiguration().getRaw(keyValue.getKey());
-        if ((oldValue == null) || (keyValue.getValue().equals(oldValue) == false)) {
-          udfProps.put(keyValue.getKey(), keyValue.getValue());
-        }
-      }
-      //Store credentials in a private hash map and not the udf context to
-      // make sure they are not public.
-      jobCredentials.put(INNER_SIGNATURE_PREFIX + "_" + sign,job.getCredentials());
-      udfProps.put(HCatConstants.HCAT_PIG_STORER_LOCATION_SET, true);
+    public HCatStorer(String partSpecs, String schema) throws Exception {
+        super(partSpecs, schema);
     }
-  }
 
-  @Override
-  public void storeSchema(ResourceSchema schema, String arg1, Job job) throws IOException {
-    HCatHadoopShims.Instance.get().commitJob(getOutputFormat(), schema, arg1, job);
-  }
+    public HCatStorer(String partSpecs) throws Exception {
+        this(partSpecs, null);
+    }
 
-  @Override
-  public void cleanupOnFailure(String location, Job job) throws IOException {
-      HCatHadoopShims.Instance.get().abortJob(getOutputFormat(), job);
-  }
+    public HCatStorer() throws Exception {
+        this(null, null);
+    }
+
+    @Override
+    public OutputFormat getOutputFormat() throws IOException {
+        return new HCatOutputFormat();
+    }
+
+    @Override
+    public void setStoreLocation(String location, Job job) throws IOException {
+        HCatContext.getInstance().mergeConf(job.getConfiguration());
+        HCatContext.getInstance().getConf().setBoolean(
+            HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION, false);
+
+        Configuration config = job.getConfiguration();
+        config.set(INNER_SIGNATURE, INNER_SIGNATURE_PREFIX + "_" + sign);
+        Properties udfProps = UDFContext.getUDFContext().getUDFProperties(
+            this.getClass(), new String[]{sign});
+        String[] userStr = location.split("\\.");
+
+        if (udfProps.containsKey(HCatConstants.HCAT_PIG_STORER_LOCATION_SET)) {
+            for (Enumeration<Object> emr = udfProps.keys(); emr.hasMoreElements(); ) {
+                PigHCatUtil.getConfigFromUDFProperties(udfProps, config, emr.nextElement().toString());
+            }
+            Credentials crd = jobCredentials.get(INNER_SIGNATURE_PREFIX + "_" + sign);
+            if (crd != null) {
+                job.getCredentials().addAll(crd);
+            }
+        } else {
+            Job clone = new Job(job.getConfiguration());
+            OutputJobInfo outputJobInfo;
+            if (userStr.length == 2) {
+                outputJobInfo = OutputJobInfo.create(userStr[0], userStr[1], partitions);
+            } else if (userStr.length == 1) {
+                outputJobInfo = OutputJobInfo.create(null, userStr[0], partitions);
+            } else {
+                throw new FrontendException("location " + location
+                    + " is invalid. It must be of the form [db.]table",
+                    PigHCatUtil.PIG_EXCEPTION_CODE);
+            }
+            Schema schema = (Schema) ObjectSerializer.deserialize(udfProps.getProperty(PIG_SCHEMA));
+            if (schema != null) {
+                pigSchema = schema;
+            }
+            if (pigSchema == null) {
+                throw new FrontendException(
+                    "Schema for data cannot be determined.",
+                    PigHCatUtil.PIG_EXCEPTION_CODE);
+            }
+            try {
+                HCatOutputFormat.setOutput(job, outputJobInfo);
+            } catch (HCatException he) {
+                // pass the message to the user - essentially something about
+                // the table
+                // information passed to HCatOutputFormat was not right
+                throw new PigException(he.getMessage(),
+                    PigHCatUtil.PIG_EXCEPTION_CODE, he);
+            }
+            HCatSchema hcatTblSchema = HCatOutputFormat.getTableSchema(job);
+            try {
+                doSchemaValidations(pigSchema, hcatTblSchema);
+            } catch (HCatException he) {
+                throw new FrontendException(he.getMessage(), PigHCatUtil.PIG_EXCEPTION_CODE, he);
+            }
+            computedSchema = convertPigSchemaToHCatSchema(pigSchema, hcatTblSchema);
+            HCatOutputFormat.setSchema(job, computedSchema);
+            udfProps.setProperty(COMPUTED_OUTPUT_SCHEMA, ObjectSerializer.serialize(computedSchema));
+
+            // We will store all the new /changed properties in the job in the
+            // udf context, so the the HCatOutputFormat.setOutput and setSchema
+            // methods need not be called many times.
+            for (Entry<String, String> keyValue : job.getConfiguration()) {
+                String oldValue = clone.getConfiguration().getRaw(keyValue.getKey());
+                if ((oldValue == null) || (keyValue.getValue().equals(oldValue) == false)) {
+                    udfProps.put(keyValue.getKey(), keyValue.getValue());
+                }
+            }
+            //Store credentials in a private hash map and not the udf context to
+            // make sure they are not public.
+            jobCredentials.put(INNER_SIGNATURE_PREFIX + "_" + sign, job.getCredentials());
+            udfProps.put(HCatConstants.HCAT_PIG_STORER_LOCATION_SET, true);
+        }
+    }
+
+    @Override
+    public void storeSchema(ResourceSchema schema, String arg1, Job job) throws IOException {
+        HCatHadoopShims.Instance.get().commitJob(getOutputFormat(), schema, arg1, job);
+    }
+
+    @Override
+    public void cleanupOnFailure(String location, Job job) throws IOException {
+        HCatHadoopShims.Instance.get().abortJob(getOutputFormat(), job);
+    }
 }
diff --git a/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/PigHCatUtil.java b/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/PigHCatUtil.java
index 0f74034..2407de5 100644
--- a/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/PigHCatUtil.java
+++ b/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/PigHCatUtil.java
@@ -56,388 +56,388 @@
 
 public class PigHCatUtil {
 
-  static final int PIG_EXCEPTION_CODE = 1115; // http://wiki.apache.org/pig/PigErrorHandlingFunctionalSpecification#Error_codes
-  private static final String DEFAULT_DB = MetaStoreUtils.DEFAULT_DATABASE_NAME;
+    static final int PIG_EXCEPTION_CODE = 1115; // http://wiki.apache.org/pig/PigErrorHandlingFunctionalSpecification#Error_codes
+    private static final String DEFAULT_DB = MetaStoreUtils.DEFAULT_DATABASE_NAME;
 
-  private final  Map<Pair<String,String>, Table> hcatTableCache =
-    new HashMap<Pair<String,String>, Table>();
+    private final Map<Pair<String, String>, Table> hcatTableCache =
+        new HashMap<Pair<String, String>, Table>();
 
-  private static final TupleFactory tupFac = TupleFactory.getInstance();
+    private static final TupleFactory tupFac = TupleFactory.getInstance();
 
-  static public Pair<String, String> getDBTableNames(String location) throws IOException {
-    // the location string will be of the form:
-    // <database name>.<table name> - parse it and
-    // communicate the information to HCatInputFormat
+    static public Pair<String, String> getDBTableNames(String location) throws IOException {
+        // the location string will be of the form:
+        // <database name>.<table name> - parse it and
+        // communicate the information to HCatInputFormat
 
-    try {
-      return HCatUtil.getDbAndTableName(location);
-    } catch (IOException e) {
-      String locationErrMsg = "The input location in load statement " +
-      "should be of the form " +
-      "<databasename>.<table name> or <table name>. Got " + location;
-      throw new PigException(locationErrMsg, PIG_EXCEPTION_CODE);
-    }
-  }
-
-  static public String getHCatServerUri(Job job) {
-
-    return job.getConfiguration().get(HiveConf.ConfVars.METASTOREURIS.varname);
-  }
-
-  static public String getHCatServerPrincipal(Job job) {
-
-    return job.getConfiguration().get(HCatConstants.HCAT_METASTORE_PRINCIPAL);
-  }
-
-  private static HiveMetaStoreClient getHiveMetaClient(String serverUri,
-                                                       String serverKerberosPrincipal, Class<?> clazz) throws Exception {
-    HiveConf hiveConf = new HiveConf(clazz);
-
-    if (serverUri != null){
-      hiveConf.set("hive.metastore.local", "false");
-      hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, serverUri.trim());
+        try {
+            return HCatUtil.getDbAndTableName(location);
+        } catch (IOException e) {
+            String locationErrMsg = "The input location in load statement " +
+                "should be of the form " +
+                "<databasename>.<table name> or <table name>. Got " + location;
+            throw new PigException(locationErrMsg, PIG_EXCEPTION_CODE);
+        }
     }
 
-    if (serverKerberosPrincipal != null){
-      hiveConf.setBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL, true);
-      hiveConf.setVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL, serverKerberosPrincipal);
+    static public String getHCatServerUri(Job job) {
+
+        return job.getConfiguration().get(HiveConf.ConfVars.METASTOREURIS.varname);
     }
 
-    try {
-        return HCatUtil.getHiveClient(hiveConf);
-    } catch (Exception e){
-      throw new Exception("Could not instantiate a HiveMetaStoreClient connecting to server uri:["+serverUri+"]",e);
-    }
-  }
+    static public String getHCatServerPrincipal(Job job) {
 
-
-  HCatSchema getHCatSchema(List<RequiredField> fields, String signature, Class<?> classForUDFCLookup) throws IOException {
-    if(fields == null) {
-      return null;
+        return job.getConfiguration().get(HCatConstants.HCAT_METASTORE_PRINCIPAL);
     }
 
-    Properties props = UDFContext.getUDFContext().getUDFProperties(
-        classForUDFCLookup, new String[] {signature});
-    HCatSchema hcatTableSchema = (HCatSchema) props.get(HCatConstants.HCAT_TABLE_SCHEMA);
+    private static HiveMetaStoreClient getHiveMetaClient(String serverUri,
+                                                         String serverKerberosPrincipal, Class<?> clazz) throws Exception {
+        HiveConf hiveConf = new HiveConf(clazz);
 
-    ArrayList<HCatFieldSchema> fcols = new ArrayList<HCatFieldSchema>();
-    for(RequiredField rf: fields) {
-      fcols.add(hcatTableSchema.getFields().get(rf.getIndex()));
-    }
-    return new HCatSchema(fcols);
-  }
+        if (serverUri != null) {
+            hiveConf.set("hive.metastore.local", "false");
+            hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, serverUri.trim());
+        }
 
-  public Table getTable(String location, String hcatServerUri, String hcatServerPrincipal) throws IOException{
-    Pair<String, String> loc_server = new Pair<String,String>(location, hcatServerUri);
-    Table hcatTable = hcatTableCache.get(loc_server);
-    if(hcatTable != null){
-      return hcatTable;
+        if (serverKerberosPrincipal != null) {
+            hiveConf.setBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL, true);
+            hiveConf.setVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL, serverKerberosPrincipal);
+        }
+
+        try {
+            return HCatUtil.getHiveClient(hiveConf);
+        } catch (Exception e) {
+            throw new Exception("Could not instantiate a HiveMetaStoreClient connecting to server uri:[" + serverUri + "]", e);
+        }
     }
 
-    Pair<String, String> dbTablePair = PigHCatUtil.getDBTableNames(location);
-    String dbName = dbTablePair.first;
-    String tableName = dbTablePair.second;
-    Table table = null;
-    HiveMetaStoreClient client = null;
-    try {
-      client = getHiveMetaClient(hcatServerUri, hcatServerPrincipal, PigHCatUtil.class);
-      table = HCatUtil.getTable(client, dbName, tableName);
-    } catch (NoSuchObjectException nsoe){
-      throw new PigException("Table not found : " + nsoe.getMessage(), PIG_EXCEPTION_CODE); // prettier error messages to frontend
-    } catch (Exception e) {
-      throw new IOException(e);
-    } finally {
-        HCatUtil.closeHiveClientQuietly(client);
-    }
-    hcatTableCache.put(loc_server, table);
-    return table;
-  }
 
-  public static ResourceSchema getResourceSchema(HCatSchema hcatSchema) throws IOException {
+    HCatSchema getHCatSchema(List<RequiredField> fields, String signature, Class<?> classForUDFCLookup) throws IOException {
+        if (fields == null) {
+            return null;
+        }
 
-    List<ResourceFieldSchema> rfSchemaList = new ArrayList<ResourceFieldSchema>();
-    for (HCatFieldSchema hfs : hcatSchema.getFields()){
-      ResourceFieldSchema rfSchema;
-      rfSchema = getResourceSchemaFromFieldSchema(hfs);
-      rfSchemaList.add(rfSchema);
-    }
-    ResourceSchema rSchema = new ResourceSchema();
-    rSchema.setFields(rfSchemaList.toArray(new ResourceFieldSchema[0]));
-    return rSchema;
+        Properties props = UDFContext.getUDFContext().getUDFProperties(
+            classForUDFCLookup, new String[]{signature});
+        HCatSchema hcatTableSchema = (HCatSchema) props.get(HCatConstants.HCAT_TABLE_SCHEMA);
 
-  }
-
-  private static ResourceFieldSchema getResourceSchemaFromFieldSchema(HCatFieldSchema hfs)
-      throws IOException {
-    ResourceFieldSchema rfSchema;
-    // if we are dealing with a bag or tuple column - need to worry about subschema
-    if(hfs.getType() == Type.STRUCT) {
-        rfSchema = new ResourceFieldSchema()
-          .setName(hfs.getName())
-          .setDescription(hfs.getComment())
-          .setType(getPigType( hfs))
-          .setSchema(getTupleSubSchema(hfs));
-    } else if(hfs.getType() == Type.ARRAY) {
-        rfSchema = new ResourceFieldSchema()
-          .setName(hfs.getName())
-          .setDescription(hfs.getComment())
-          .setType(getPigType( hfs))
-          .setSchema(getBagSubSchema(hfs));
-    } else {
-      rfSchema = new ResourceFieldSchema()
-          .setName(hfs.getName())
-          .setDescription(hfs.getComment())
-          .setType(getPigType( hfs))
-          .setSchema(null); // no munging inner-schemas
-    }
-    return rfSchema;
-  }
-
-  protected static ResourceSchema getBagSubSchema(HCatFieldSchema hfs) throws IOException {
-    // there are two cases - array<Type> and array<struct<...>>
-    // in either case the element type of the array is represented in a
-    // tuple field schema in the bag's field schema - the second case (struct)
-    // more naturally translates to the tuple - in the first case (array<Type>)
-    // we simulate the tuple by putting the single field in a tuple
-
-    Properties props = UDFContext.getUDFContext().getClientSystemProps();
-    String innerTupleName = HCatConstants.HCAT_PIG_INNER_TUPLE_NAME_DEFAULT;
-    if (props != null && props.containsKey(HCatConstants.HCAT_PIG_INNER_TUPLE_NAME)) {
-      innerTupleName = props.getProperty(HCatConstants.HCAT_PIG_INNER_TUPLE_NAME)
-          .replaceAll("FIELDNAME", hfs.getName());
-    }
-    String innerFieldName = HCatConstants.HCAT_PIG_INNER_FIELD_NAME_DEFAULT;
-    if (props != null && props.containsKey(HCatConstants.HCAT_PIG_INNER_FIELD_NAME)) {
-      innerFieldName = props.getProperty(HCatConstants.HCAT_PIG_INNER_FIELD_NAME)
-          .replaceAll("FIELDNAME", hfs.getName());
+        ArrayList<HCatFieldSchema> fcols = new ArrayList<HCatFieldSchema>();
+        for (RequiredField rf : fields) {
+            fcols.add(hcatTableSchema.getFields().get(rf.getIndex()));
+        }
+        return new HCatSchema(fcols);
     }
 
-    ResourceFieldSchema[] bagSubFieldSchemas = new ResourceFieldSchema[1];
-    bagSubFieldSchemas[0] = new ResourceFieldSchema().setName(innerTupleName)
-      .setDescription("The tuple in the bag")
-      .setType(DataType.TUPLE);
-    HCatFieldSchema arrayElementFieldSchema = hfs.getArrayElementSchema().get(0);
-    if(arrayElementFieldSchema.getType() == Type.STRUCT) {
-      bagSubFieldSchemas[0].setSchema(getTupleSubSchema(arrayElementFieldSchema));
-    } else if(arrayElementFieldSchema.getType() == Type.ARRAY) {
-      ResourceSchema s = new ResourceSchema();
-      List<ResourceFieldSchema> lrfs = Arrays.asList(getResourceSchemaFromFieldSchema(arrayElementFieldSchema));
-      s.setFields(lrfs.toArray(new ResourceFieldSchema[0]));
-      bagSubFieldSchemas[0].setSchema(s);
-    } else {
-      ResourceFieldSchema[] innerTupleFieldSchemas = new ResourceFieldSchema[1];
-      innerTupleFieldSchemas[0] = new ResourceFieldSchema().setName(innerFieldName)
-        .setDescription("The inner field in the tuple in the bag")
-        .setType(getPigType(arrayElementFieldSchema))
-        .setSchema(null); // the element type is not a tuple - so no subschema
-      bagSubFieldSchemas[0].setSchema(new ResourceSchema().setFields(innerTupleFieldSchemas));
-    }
-    ResourceSchema s = new ResourceSchema().setFields(bagSubFieldSchemas);
-    return s;
+    public Table getTable(String location, String hcatServerUri, String hcatServerPrincipal) throws IOException {
+        Pair<String, String> loc_server = new Pair<String, String>(location, hcatServerUri);
+        Table hcatTable = hcatTableCache.get(loc_server);
+        if (hcatTable != null) {
+            return hcatTable;
+        }
 
-  }
-
-  private static ResourceSchema getTupleSubSchema(HCatFieldSchema hfs) throws IOException {
-    // for each struct subfield, create equivalent ResourceFieldSchema
-    ResourceSchema s = new ResourceSchema();
-    List<ResourceFieldSchema> lrfs = new ArrayList<ResourceFieldSchema>();
-    for(HCatFieldSchema subField : hfs.getStructSubSchema().getFields()) {
-      lrfs.add(getResourceSchemaFromFieldSchema(subField));
-    }
-    s.setFields(lrfs.toArray(new ResourceFieldSchema[0]));
-    return s;
-  }
-
-/**
-   * @param hfs the field schema of the column
-   * @return corresponding pig type
-   * @throws IOException
-   */
-  static public byte getPigType(HCatFieldSchema hfs) throws IOException {
-    return getPigType(hfs.getType());
-  }
-
-  static public byte getPigType(Type type) throws IOException {
-    String errMsg;
-
-    if (type == Type.STRING){
-      return DataType.CHARARRAY;
+        Pair<String, String> dbTablePair = PigHCatUtil.getDBTableNames(location);
+        String dbName = dbTablePair.first;
+        String tableName = dbTablePair.second;
+        Table table = null;
+        HiveMetaStoreClient client = null;
+        try {
+            client = getHiveMetaClient(hcatServerUri, hcatServerPrincipal, PigHCatUtil.class);
+            table = HCatUtil.getTable(client, dbName, tableName);
+        } catch (NoSuchObjectException nsoe) {
+            throw new PigException("Table not found : " + nsoe.getMessage(), PIG_EXCEPTION_CODE); // prettier error messages to frontend
+        } catch (Exception e) {
+            throw new IOException(e);
+        } finally {
+            HCatUtil.closeHiveClientQuietly(client);
+        }
+        hcatTableCache.put(loc_server, table);
+        return table;
     }
 
-    if ( (type == Type.INT) || (type == Type.SMALLINT) || (type == Type.TINYINT)){
-      return DataType.INTEGER;
+    public static ResourceSchema getResourceSchema(HCatSchema hcatSchema) throws IOException {
+
+        List<ResourceFieldSchema> rfSchemaList = new ArrayList<ResourceFieldSchema>();
+        for (HCatFieldSchema hfs : hcatSchema.getFields()) {
+            ResourceFieldSchema rfSchema;
+            rfSchema = getResourceSchemaFromFieldSchema(hfs);
+            rfSchemaList.add(rfSchema);
+        }
+        ResourceSchema rSchema = new ResourceSchema();
+        rSchema.setFields(rfSchemaList.toArray(new ResourceFieldSchema[0]));
+        return rSchema;
+
     }
 
-    if (type == Type.ARRAY){
-      return DataType.BAG;
+    private static ResourceFieldSchema getResourceSchemaFromFieldSchema(HCatFieldSchema hfs)
+        throws IOException {
+        ResourceFieldSchema rfSchema;
+        // if we are dealing with a bag or tuple column - need to worry about subschema
+        if (hfs.getType() == Type.STRUCT) {
+            rfSchema = new ResourceFieldSchema()
+                .setName(hfs.getName())
+                .setDescription(hfs.getComment())
+                .setType(getPigType(hfs))
+                .setSchema(getTupleSubSchema(hfs));
+        } else if (hfs.getType() == Type.ARRAY) {
+            rfSchema = new ResourceFieldSchema()
+                .setName(hfs.getName())
+                .setDescription(hfs.getComment())
+                .setType(getPigType(hfs))
+                .setSchema(getBagSubSchema(hfs));
+        } else {
+            rfSchema = new ResourceFieldSchema()
+                .setName(hfs.getName())
+                .setDescription(hfs.getComment())
+                .setType(getPigType(hfs))
+                .setSchema(null); // no munging inner-schemas
+        }
+        return rfSchema;
     }
 
-    if (type == Type.STRUCT){
-      return DataType.TUPLE;
+    protected static ResourceSchema getBagSubSchema(HCatFieldSchema hfs) throws IOException {
+        // there are two cases - array<Type> and array<struct<...>>
+        // in either case the element type of the array is represented in a
+        // tuple field schema in the bag's field schema - the second case (struct)
+        // more naturally translates to the tuple - in the first case (array<Type>)
+        // we simulate the tuple by putting the single field in a tuple
+
+        Properties props = UDFContext.getUDFContext().getClientSystemProps();
+        String innerTupleName = HCatConstants.HCAT_PIG_INNER_TUPLE_NAME_DEFAULT;
+        if (props != null && props.containsKey(HCatConstants.HCAT_PIG_INNER_TUPLE_NAME)) {
+            innerTupleName = props.getProperty(HCatConstants.HCAT_PIG_INNER_TUPLE_NAME)
+                .replaceAll("FIELDNAME", hfs.getName());
+        }
+        String innerFieldName = HCatConstants.HCAT_PIG_INNER_FIELD_NAME_DEFAULT;
+        if (props != null && props.containsKey(HCatConstants.HCAT_PIG_INNER_FIELD_NAME)) {
+            innerFieldName = props.getProperty(HCatConstants.HCAT_PIG_INNER_FIELD_NAME)
+                .replaceAll("FIELDNAME", hfs.getName());
+        }
+
+        ResourceFieldSchema[] bagSubFieldSchemas = new ResourceFieldSchema[1];
+        bagSubFieldSchemas[0] = new ResourceFieldSchema().setName(innerTupleName)
+            .setDescription("The tuple in the bag")
+            .setType(DataType.TUPLE);
+        HCatFieldSchema arrayElementFieldSchema = hfs.getArrayElementSchema().get(0);
+        if (arrayElementFieldSchema.getType() == Type.STRUCT) {
+            bagSubFieldSchemas[0].setSchema(getTupleSubSchema(arrayElementFieldSchema));
+        } else if (arrayElementFieldSchema.getType() == Type.ARRAY) {
+            ResourceSchema s = new ResourceSchema();
+            List<ResourceFieldSchema> lrfs = Arrays.asList(getResourceSchemaFromFieldSchema(arrayElementFieldSchema));
+            s.setFields(lrfs.toArray(new ResourceFieldSchema[0]));
+            bagSubFieldSchemas[0].setSchema(s);
+        } else {
+            ResourceFieldSchema[] innerTupleFieldSchemas = new ResourceFieldSchema[1];
+            innerTupleFieldSchemas[0] = new ResourceFieldSchema().setName(innerFieldName)
+                .setDescription("The inner field in the tuple in the bag")
+                .setType(getPigType(arrayElementFieldSchema))
+                .setSchema(null); // the element type is not a tuple - so no subschema
+            bagSubFieldSchemas[0].setSchema(new ResourceSchema().setFields(innerTupleFieldSchemas));
+        }
+        ResourceSchema s = new ResourceSchema().setFields(bagSubFieldSchemas);
+        return s;
+
     }
 
-    if (type == Type.MAP){
-      return DataType.MAP;
+    private static ResourceSchema getTupleSubSchema(HCatFieldSchema hfs) throws IOException {
+        // for each struct subfield, create equivalent ResourceFieldSchema
+        ResourceSchema s = new ResourceSchema();
+        List<ResourceFieldSchema> lrfs = new ArrayList<ResourceFieldSchema>();
+        for (HCatFieldSchema subField : hfs.getStructSubSchema().getFields()) {
+            lrfs.add(getResourceSchemaFromFieldSchema(subField));
+        }
+        s.setFields(lrfs.toArray(new ResourceFieldSchema[0]));
+        return s;
     }
 
-    if (type == Type.BIGINT){
-      return DataType.LONG;
+    /**
+     * @param hfs the field schema of the column
+     * @return corresponding pig type
+     * @throws IOException
+     */
+    static public byte getPigType(HCatFieldSchema hfs) throws IOException {
+        return getPigType(hfs.getType());
     }
 
-    if (type == Type.FLOAT){
-      return DataType.FLOAT;
+    static public byte getPigType(Type type) throws IOException {
+        String errMsg;
+
+        if (type == Type.STRING) {
+            return DataType.CHARARRAY;
+        }
+
+        if ((type == Type.INT) || (type == Type.SMALLINT) || (type == Type.TINYINT)) {
+            return DataType.INTEGER;
+        }
+
+        if (type == Type.ARRAY) {
+            return DataType.BAG;
+        }
+
+        if (type == Type.STRUCT) {
+            return DataType.TUPLE;
+        }
+
+        if (type == Type.MAP) {
+            return DataType.MAP;
+        }
+
+        if (type == Type.BIGINT) {
+            return DataType.LONG;
+        }
+
+        if (type == Type.FLOAT) {
+            return DataType.FLOAT;
+        }
+
+        if (type == Type.DOUBLE) {
+            return DataType.DOUBLE;
+        }
+
+        if (type == Type.BINARY) {
+            return DataType.BYTEARRAY;
+        }
+
+        if (type == Type.BOOLEAN) {
+            errMsg = "HCatalog column type 'BOOLEAN' is not supported in " +
+                "Pig as a column type";
+            throw new PigException(errMsg, PIG_EXCEPTION_CODE);
+        }
+
+        errMsg = "HCatalog column type '" + type.toString() + "' is not supported in Pig as a column type";
+        throw new PigException(errMsg, PIG_EXCEPTION_CODE);
     }
 
-    if (type == Type.DOUBLE){
-      return DataType.DOUBLE;
+    public static Tuple transformToTuple(HCatRecord hr, HCatSchema hs) throws Exception {
+        if (hr == null) {
+            return null;
+        }
+        return transformToTuple(hr.getAll(), hs);
     }
 
-    if (type == Type.BINARY){
-        return DataType.BYTEARRAY;
+    @SuppressWarnings("unchecked")
+    public static Object extractPigObject(Object o, HCatFieldSchema hfs) throws Exception {
+        Object result;
+        Type itemType = hfs.getType();
+        switch (itemType) {
+        case BINARY:
+            result = (o == null) ? null : new DataByteArray((byte[]) o);
+            break;
+        case STRUCT:
+            result = transformToTuple((List<Object>) o, hfs);
+            break;
+        case ARRAY:
+            result = transformToBag((List<? extends Object>) o, hfs);
+            break;
+        case MAP:
+            result = transformToPigMap((Map<String, Object>) o, hfs);
+            break;
+        default:
+            result = o;
+            break;
+        }
+        return result;
     }
 
-    if (type == Type.BOOLEAN){
-      errMsg = "HCatalog column type 'BOOLEAN' is not supported in " +
-      "Pig as a column type";
-      throw new PigException(errMsg, PIG_EXCEPTION_CODE);
+    public static Tuple transformToTuple(List<? extends Object> objList, HCatFieldSchema hfs) throws Exception {
+        try {
+            return transformToTuple(objList, hfs.getStructSubSchema());
+        } catch (Exception e) {
+            if (hfs.getType() != Type.STRUCT) {
+                throw new Exception("Expected Struct type, got " + hfs.getType(), e);
+            } else {
+                throw e;
+            }
+        }
     }
 
-    errMsg = "HCatalog column type '"+ type.toString() +"' is not supported in Pig as a column type";
-    throw new PigException(errMsg, PIG_EXCEPTION_CODE);
-  }
-
-  public static Tuple transformToTuple(HCatRecord hr, HCatSchema hs) throws Exception {
-    if (hr == null){
-      return null;
-    }
-    return transformToTuple(hr.getAll(),hs);
-  }
-
-  @SuppressWarnings("unchecked")
-  public static Object extractPigObject(Object o, HCatFieldSchema hfs) throws Exception {
-    Object result;
-    Type itemType = hfs.getType();
-    switch (itemType){
-    case BINARY:
-      result = (o == null) ? null : new DataByteArray((byte[])o);
-      break;
-    case STRUCT:
-      result = transformToTuple((List<Object>)o,hfs);
-      break;
-    case ARRAY:
-      result = transformToBag((List<? extends Object>) o,hfs);
-      break;
-    case MAP:
-      result = transformToPigMap((Map<String, Object>)o,hfs);
-      break;
-    default:
-      result = o;
-      break;
-    }
-    return result;
-  }
-
-  public static Tuple transformToTuple(List<? extends Object> objList, HCatFieldSchema hfs) throws Exception {
-      try {
-          return transformToTuple(objList,hfs.getStructSubSchema());
-      } catch (Exception e){
-          if (hfs.getType() != Type.STRUCT){
-              throw new Exception("Expected Struct type, got "+hfs.getType(), e);
-          } else {
-              throw e;
-          }
-      }
-  }
-
-  public static Tuple transformToTuple(List<? extends Object> objList, HCatSchema hs) throws Exception {
-    if (objList == null){
-      return null;
-    }
-    Tuple t = tupFac.newTuple(objList.size());
-    List<HCatFieldSchema> subFields = hs.getFields();
-    for (int i = 0; i < subFields.size(); i++){
-      t.set(i,extractPigObject(objList.get(i), subFields.get(i)));
-    }
-    return t;
-  }
-
-  public static Map<String,Object> transformToPigMap(Map<String,Object> map, HCatFieldSchema hfs) throws Exception {
-    if (map == null) {
-      return null;
+    public static Tuple transformToTuple(List<? extends Object> objList, HCatSchema hs) throws Exception {
+        if (objList == null) {
+            return null;
+        }
+        Tuple t = tupFac.newTuple(objList.size());
+        List<HCatFieldSchema> subFields = hs.getFields();
+        for (int i = 0; i < subFields.size(); i++) {
+            t.set(i, extractPigObject(objList.get(i), subFields.get(i)));
+        }
+        return t;
     }
 
-    Map<String,Object> result = new HashMap<String, Object>();
-    for (Entry<String, Object> entry : map.entrySet()) {
-      result.put(entry.getKey(), extractPigObject(entry.getValue(), hfs.getMapValueSchema().get(0)));
-    }
-    return result;
-  }
+    public static Map<String, Object> transformToPigMap(Map<String, Object> map, HCatFieldSchema hfs) throws Exception {
+        if (map == null) {
+            return null;
+        }
 
-  @SuppressWarnings("unchecked")
-  public static DataBag transformToBag(List<? extends Object> list, HCatFieldSchema hfs) throws Exception {
-    if (list == null){
-      return null;
+        Map<String, Object> result = new HashMap<String, Object>();
+        for (Entry<String, Object> entry : map.entrySet()) {
+            result.put(entry.getKey(), extractPigObject(entry.getValue(), hfs.getMapValueSchema().get(0)));
+        }
+        return result;
     }
 
-    HCatFieldSchema elementSubFieldSchema = hfs.getArrayElementSchema().getFields().get(0);
-    DataBag db = new DefaultDataBag();
-    for (Object o : list){
-      Tuple tuple;
-      if (elementSubFieldSchema.getType() == Type.STRUCT){
-        tuple = transformToTuple((List<Object>)o, elementSubFieldSchema);
-      } else {
-        // bags always contain tuples
-        tuple = tupFac.newTuple(extractPigObject(o, elementSubFieldSchema));
-      }
-      db.add(tuple);
+    @SuppressWarnings("unchecked")
+    public static DataBag transformToBag(List<? extends Object> list, HCatFieldSchema hfs) throws Exception {
+        if (list == null) {
+            return null;
+        }
+
+        HCatFieldSchema elementSubFieldSchema = hfs.getArrayElementSchema().getFields().get(0);
+        DataBag db = new DefaultDataBag();
+        for (Object o : list) {
+            Tuple tuple;
+            if (elementSubFieldSchema.getType() == Type.STRUCT) {
+                tuple = transformToTuple((List<Object>) o, elementSubFieldSchema);
+            } else {
+                // bags always contain tuples
+                tuple = tupFac.newTuple(extractPigObject(o, elementSubFieldSchema));
+            }
+            db.add(tuple);
+        }
+        return db;
     }
-    return db;
-  }
 
 
-  private static void validateHCatSchemaFollowsPigRules(HCatSchema tblSchema) throws PigException {
-    for(HCatFieldSchema hcatField : tblSchema.getFields()){
-      validateHcatFieldFollowsPigRules(hcatField);
+    private static void validateHCatSchemaFollowsPigRules(HCatSchema tblSchema) throws PigException {
+        for (HCatFieldSchema hcatField : tblSchema.getFields()) {
+            validateHcatFieldFollowsPigRules(hcatField);
+        }
     }
-  }
 
-  private static void validateHcatFieldFollowsPigRules(HCatFieldSchema hcatField) throws PigException {
-    try {
-      Type hType = hcatField.getType();
-      switch(hType){
-      case BOOLEAN:
-        throw new PigException("Incompatible type found in hcat table schema: "+hcatField, PigHCatUtil.PIG_EXCEPTION_CODE);
-      case ARRAY:
-        validateHCatSchemaFollowsPigRules(hcatField.getArrayElementSchema());
-        break;
-      case STRUCT:
-        validateHCatSchemaFollowsPigRules(hcatField.getStructSubSchema());
-        break;
-      case MAP:
-        // key is only string
-        validateHCatSchemaFollowsPigRules(hcatField.getMapValueSchema());
-        break;
-      }
-    } catch (HCatException e) {
-      throw new PigException("Incompatible type found in hcat table schema: "+hcatField, PigHCatUtil.PIG_EXCEPTION_CODE, e);
+    private static void validateHcatFieldFollowsPigRules(HCatFieldSchema hcatField) throws PigException {
+        try {
+            Type hType = hcatField.getType();
+            switch (hType) {
+            case BOOLEAN:
+                throw new PigException("Incompatible type found in hcat table schema: " + hcatField, PigHCatUtil.PIG_EXCEPTION_CODE);
+            case ARRAY:
+                validateHCatSchemaFollowsPigRules(hcatField.getArrayElementSchema());
+                break;
+            case STRUCT:
+                validateHCatSchemaFollowsPigRules(hcatField.getStructSubSchema());
+                break;
+            case MAP:
+                // key is only string
+                validateHCatSchemaFollowsPigRules(hcatField.getMapValueSchema());
+                break;
+            }
+        } catch (HCatException e) {
+            throw new PigException("Incompatible type found in hcat table schema: " + hcatField, PigHCatUtil.PIG_EXCEPTION_CODE, e);
+        }
     }
-  }
 
 
-  public static void validateHCatTableSchemaFollowsPigRules(HCatSchema hcatTableSchema) throws IOException {
-    validateHCatSchemaFollowsPigRules(hcatTableSchema);
-  }
-
-  public static void getConfigFromUDFProperties(Properties p, Configuration config, String propName) {
-    if(p.getProperty(propName) != null){
-      config.set(propName, p.getProperty(propName));
+    public static void validateHCatTableSchemaFollowsPigRules(HCatSchema hcatTableSchema) throws IOException {
+        validateHCatSchemaFollowsPigRules(hcatTableSchema);
     }
-  }
 
-  public static void saveConfigIntoUDFProperties(Properties p, Configuration config, String propName) {
-    if(config.get(propName) != null){
-      p.setProperty(propName, config.get(propName));
+    public static void getConfigFromUDFProperties(Properties p, Configuration config, String propName) {
+        if (p.getProperty(propName) != null) {
+            config.set(propName, p.getProperty(propName));
+        }
     }
-  }
+
+    public static void saveConfigIntoUDFProperties(Properties p, Configuration config, String propName) {
+        if (config.get(propName) != null) {
+            p.setProperty(propName, config.get(propName));
+        }
+    }
 
 }
diff --git a/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/drivers/LoadFuncBasedInputFormat.java b/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/drivers/LoadFuncBasedInputFormat.java
index 940e2c0..7028865 100644
--- a/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/drivers/LoadFuncBasedInputFormat.java
+++ b/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/drivers/LoadFuncBasedInputFormat.java
@@ -40,156 +40,156 @@
 /**
  * based on {@link org.apache.pig.builtin.PigStorage}
  */
-public class LoadFuncBasedInputFormat extends InputFormat<BytesWritable,Tuple> {
+public class LoadFuncBasedInputFormat extends InputFormat<BytesWritable, Tuple> {
 
-  private final LoadFunc loadFunc;
-  private static ResourceFieldSchema[] fields;
-
-  public LoadFuncBasedInputFormat(LoadFunc loadFunc, ResourceSchema dataSchema, String location, Configuration conf) throws IOException {
-
-    this.loadFunc = loadFunc;
-    fields = dataSchema.getFields();
-    
-    // Simulate the frontend call sequence for LoadFunc, in case LoadFunc need to store something into UDFContext (as JsonLoader does)
-    if (loadFunc instanceof LoadMetadata) {
-        ((LoadMetadata)loadFunc).getSchema(location, new Job(conf));
-    }
-  }
-
-  @Override
-  public RecordReader<BytesWritable, Tuple> createRecordReader(
-      InputSplit split, TaskAttemptContext taskContext) throws IOException,
-      InterruptedException {
-    RecordReader<BytesWritable,Tuple> reader = loadFunc.getInputFormat().createRecordReader(split, taskContext);
-    return new LoadFuncBasedRecordReader(reader, loadFunc);
-  }
-
-  @Override
-  public List<InputSplit> getSplits(JobContext jobContext) throws IOException,
-  InterruptedException {
-    try {
-      InputFormat<BytesWritable,Tuple> inpFormat = loadFunc.getInputFormat();
-      return inpFormat.getSplits(jobContext);
-
-    } catch (InterruptedException    e) {
-      throw new IOException(e);
-    }
-  }
-
-  static class LoadFuncBasedRecordReader extends RecordReader<BytesWritable, Tuple> {
-
-    private Tuple tupleFromDisk;
-    private final RecordReader<BytesWritable,Tuple> reader;
     private final LoadFunc loadFunc;
-    private final LoadCaster caster;
+    private static ResourceFieldSchema[] fields;
 
-     /**
-      * @param reader
-      * @param loadFunc
-      * @throws IOException
-      */
-     public LoadFuncBasedRecordReader(RecordReader<BytesWritable,Tuple> reader, LoadFunc loadFunc) throws IOException {
-       this.reader = reader;
-       this.loadFunc = loadFunc;
-       this.caster = loadFunc.getLoadCaster();
-     }
+    public LoadFuncBasedInputFormat(LoadFunc loadFunc, ResourceSchema dataSchema, String location, Configuration conf) throws IOException {
 
-     @Override
-     public void close() throws IOException {
-       reader.close();
-     }
+        this.loadFunc = loadFunc;
+        fields = dataSchema.getFields();
 
-     @Override
-     public BytesWritable getCurrentKey() throws IOException,
-     InterruptedException {
-       return null;
-     }
+        // Simulate the frontend call sequence for LoadFunc, in case LoadFunc need to store something into UDFContext (as JsonLoader does)
+        if (loadFunc instanceof LoadMetadata) {
+            ((LoadMetadata) loadFunc).getSchema(location, new Job(conf));
+        }
+    }
 
-     @Override
-     public Tuple getCurrentValue() throws IOException, InterruptedException {
+    @Override
+    public RecordReader<BytesWritable, Tuple> createRecordReader(
+        InputSplit split, TaskAttemptContext taskContext) throws IOException,
+        InterruptedException {
+        RecordReader<BytesWritable, Tuple> reader = loadFunc.getInputFormat().createRecordReader(split, taskContext);
+        return new LoadFuncBasedRecordReader(reader, loadFunc);
+    }
 
-       for(int i = 0; i < tupleFromDisk.size(); i++) {
+    @Override
+    public List<InputSplit> getSplits(JobContext jobContext) throws IOException,
+        InterruptedException {
+        try {
+            InputFormat<BytesWritable, Tuple> inpFormat = loadFunc.getInputFormat();
+            return inpFormat.getSplits(jobContext);
 
-         Object data = tupleFromDisk.get(i);
-         
-         // We will do conversion for bytes only for now
-         if (data instanceof DataByteArray) {
-         
-             DataByteArray dba = (DataByteArray) data;
-    
-             if(dba == null) {
-               // PigStorage will insert nulls for empty fields.
-              tupleFromDisk.set(i, null);
-              continue;
+        } catch (InterruptedException e) {
+            throw new IOException(e);
+        }
+    }
+
+    static class LoadFuncBasedRecordReader extends RecordReader<BytesWritable, Tuple> {
+
+        private Tuple tupleFromDisk;
+        private final RecordReader<BytesWritable, Tuple> reader;
+        private final LoadFunc loadFunc;
+        private final LoadCaster caster;
+
+        /**
+         * @param reader
+         * @param loadFunc
+         * @throws IOException
+         */
+        public LoadFuncBasedRecordReader(RecordReader<BytesWritable, Tuple> reader, LoadFunc loadFunc) throws IOException {
+            this.reader = reader;
+            this.loadFunc = loadFunc;
+            this.caster = loadFunc.getLoadCaster();
+        }
+
+        @Override
+        public void close() throws IOException {
+            reader.close();
+        }
+
+        @Override
+        public BytesWritable getCurrentKey() throws IOException,
+            InterruptedException {
+            return null;
+        }
+
+        @Override
+        public Tuple getCurrentValue() throws IOException, InterruptedException {
+
+            for (int i = 0; i < tupleFromDisk.size(); i++) {
+
+                Object data = tupleFromDisk.get(i);
+
+                // We will do conversion for bytes only for now
+                if (data instanceof DataByteArray) {
+
+                    DataByteArray dba = (DataByteArray) data;
+
+                    if (dba == null) {
+                        // PigStorage will insert nulls for empty fields.
+                        tupleFromDisk.set(i, null);
+                        continue;
+                    }
+
+                    switch (fields[i].getType()) {
+
+                    case DataType.CHARARRAY:
+                        tupleFromDisk.set(i, caster.bytesToCharArray(dba.get()));
+                        break;
+
+                    case DataType.INTEGER:
+                        tupleFromDisk.set(i, caster.bytesToInteger(dba.get()));
+                        break;
+
+                    case DataType.FLOAT:
+                        tupleFromDisk.set(i, caster.bytesToFloat(dba.get()));
+                        break;
+
+                    case DataType.LONG:
+                        tupleFromDisk.set(i, caster.bytesToLong(dba.get()));
+                        break;
+
+                    case DataType.DOUBLE:
+                        tupleFromDisk.set(i, caster.bytesToDouble(dba.get()));
+                        break;
+
+                    case DataType.MAP:
+                        tupleFromDisk.set(i, caster.bytesToMap(dba.get()));
+                        break;
+
+                    case DataType.BAG:
+                        tupleFromDisk.set(i, caster.bytesToBag(dba.get(), fields[i]));
+                        break;
+
+                    case DataType.TUPLE:
+                        tupleFromDisk.set(i, caster.bytesToTuple(dba.get(), fields[i]));
+                        break;
+
+                    default:
+                        throw new IOException("Unknown Pig type in data: " + fields[i].getType());
+                    }
+                }
             }
-    
-             switch(fields[i].getType()) {
-    
-             case DataType.CHARARRAY:
-               tupleFromDisk.set(i, caster.bytesToCharArray(dba.get()));
-               break;
-    
-             case DataType.INTEGER:
-               tupleFromDisk.set(i, caster.bytesToInteger(dba.get()));
-               break;
-    
-             case DataType.FLOAT:
-               tupleFromDisk.set(i, caster.bytesToFloat(dba.get()));
-               break;
-    
-             case DataType.LONG:
-               tupleFromDisk.set(i, caster.bytesToLong(dba.get()));
-               break;
-    
-             case DataType.DOUBLE:
-               tupleFromDisk.set(i, caster.bytesToDouble(dba.get()));
-               break;
-    
-             case DataType.MAP:
-               tupleFromDisk.set(i, caster.bytesToMap(dba.get()));
-               break;
-    
-             case DataType.BAG:
-               tupleFromDisk.set(i, caster.bytesToBag(dba.get(), fields[i]));
-               break;
-    
-             case DataType.TUPLE:
-               tupleFromDisk.set(i, caster.bytesToTuple(dba.get(), fields[i]));
-               break;
-    
-             default:
-               throw new IOException("Unknown Pig type in data: "+fields[i].getType());
-             }
-           }
-       }
 
-       return tupleFromDisk;
-     }
+            return tupleFromDisk;
+        }
 
 
-     @Override
-     public void initialize(InputSplit split, TaskAttemptContext ctx)
-     throws IOException, InterruptedException {
+        @Override
+        public void initialize(InputSplit split, TaskAttemptContext ctx)
+            throws IOException, InterruptedException {
 
-       reader.initialize(split, ctx);
-       loadFunc.prepareToRead(reader, null);
-     }
+            reader.initialize(split, ctx);
+            loadFunc.prepareToRead(reader, null);
+        }
 
-     @Override
-     public boolean nextKeyValue() throws IOException, InterruptedException {
+        @Override
+        public boolean nextKeyValue() throws IOException, InterruptedException {
 
-       // even if we don't need any data from disk, we will need to call
-       // getNext() on pigStorage() so we know how many rows to emit in our
-       // final output - getNext() will eventually return null when it has
-       // read all disk data and we will know to stop emitting final output
-       tupleFromDisk = loadFunc.getNext();
-       return tupleFromDisk != null;
-     }
+            // even if we don't need any data from disk, we will need to call
+            // getNext() on pigStorage() so we know how many rows to emit in our
+            // final output - getNext() will eventually return null when it has
+            // read all disk data and we will know to stop emitting final output
+            tupleFromDisk = loadFunc.getNext();
+            return tupleFromDisk != null;
+        }
 
-     @Override
-     public float getProgress() throws IOException, InterruptedException {
-       return 0;
-     }
+        @Override
+        public float getProgress() throws IOException, InterruptedException {
+            return 0;
+        }
 
-  }
+    }
 }
diff --git a/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/drivers/StoreFuncBasedOutputFormat.java b/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/drivers/StoreFuncBasedOutputFormat.java
index b43c2d2..ba603eb 100644
--- a/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/drivers/StoreFuncBasedOutputFormat.java
+++ b/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/drivers/StoreFuncBasedOutputFormat.java
@@ -38,82 +38,84 @@
 import org.apache.pig.data.Tuple;
 
 public class StoreFuncBasedOutputFormat extends
-        OutputFormat<BytesWritable, Tuple> {
+    OutputFormat<BytesWritable, Tuple> {
 
     private final StoreFuncInterface storeFunc;
-    
+
     public StoreFuncBasedOutputFormat(StoreFuncInterface storeFunc) {
 
         this.storeFunc = storeFunc;
     }
-    
+
     @Override
     public void checkOutputSpecs(JobContext jobContext) throws IOException,
-            InterruptedException {
-        OutputFormat<BytesWritable,Tuple> outputFormat =  storeFunc.getOutputFormat();
+        InterruptedException {
+        OutputFormat<BytesWritable, Tuple> outputFormat = storeFunc.getOutputFormat();
         outputFormat.checkOutputSpecs(jobContext);
     }
 
     @Override
     public OutputCommitter getOutputCommitter(TaskAttemptContext ctx)
-            throws IOException, InterruptedException {
+        throws IOException, InterruptedException {
         String serializedJobInfo = ctx.getConfiguration().get(HCatConstants.HCAT_KEY_OUTPUT_INFO);
-        OutputJobInfo outputJobInfo = (OutputJobInfo)HCatUtil.deserialize(serializedJobInfo);
+        OutputJobInfo outputJobInfo = (OutputJobInfo) HCatUtil.deserialize(serializedJobInfo);
         ResourceSchema rs = PigHCatUtil.getResourceSchema(outputJobInfo.getOutputSchema());
         String location = outputJobInfo.getLocation();
-        OutputFormat<BytesWritable,Tuple> outputFormat =  storeFunc.getOutputFormat();
+        OutputFormat<BytesWritable, Tuple> outputFormat = storeFunc.getOutputFormat();
         return new StoreFuncBasedOutputCommitter(storeFunc, outputFormat.getOutputCommitter(ctx), location, rs);
     }
 
     @Override
     public RecordWriter<BytesWritable, Tuple> getRecordWriter(
-            TaskAttemptContext ctx) throws IOException, InterruptedException {
-        RecordWriter<BytesWritable,Tuple> writer = storeFunc.getOutputFormat().getRecordWriter(ctx);
+        TaskAttemptContext ctx) throws IOException, InterruptedException {
+        RecordWriter<BytesWritable, Tuple> writer = storeFunc.getOutputFormat().getRecordWriter(ctx);
         String serializedJobInfo = ctx.getConfiguration().get(HCatConstants.HCAT_KEY_OUTPUT_INFO);
-        OutputJobInfo outputJobInfo = (OutputJobInfo)HCatUtil.deserialize(serializedJobInfo);
+        OutputJobInfo outputJobInfo = (OutputJobInfo) HCatUtil.deserialize(serializedJobInfo);
         ResourceSchema rs = PigHCatUtil.getResourceSchema(outputJobInfo.getOutputSchema());
         String location = outputJobInfo.getLocation();
         return new StoreFuncBasedRecordWriter(writer, storeFunc, location, rs);
     }
-    
+
     static class StoreFuncBasedRecordWriter extends RecordWriter<BytesWritable, Tuple> {
-        private final RecordWriter<BytesWritable,Tuple> writer;
+        private final RecordWriter<BytesWritable, Tuple> writer;
         private final StoreFuncInterface storeFunc;
         private final ResourceSchema schema;
         private final String location;
-        
-        public StoreFuncBasedRecordWriter(RecordWriter<BytesWritable,Tuple> writer, StoreFuncInterface sf, String location, ResourceSchema rs) throws IOException {
+
+        public StoreFuncBasedRecordWriter(RecordWriter<BytesWritable, Tuple> writer, StoreFuncInterface sf, String location, ResourceSchema rs) throws IOException {
             this.writer = writer;
             this.storeFunc = sf;
             this.schema = rs;
             this.location = location;
             storeFunc.prepareToWrite(writer);
         }
-        
+
         @Override
         public void close(TaskAttemptContext ctx) throws IOException,
-                InterruptedException {
+            InterruptedException {
             writer.close(ctx);
         }
 
         @Override
         public void write(BytesWritable key, Tuple value) throws IOException,
-                InterruptedException {
+            InterruptedException {
             storeFunc.putNext(value);
         }
     }
-    
+
     static class StoreFuncBasedOutputCommitter extends OutputCommitter {
         StoreFuncInterface sf;
         OutputCommitter wrappedOutputCommitter;
         String location;
         ResourceSchema rs;
+
         public StoreFuncBasedOutputCommitter(StoreFuncInterface sf, OutputCommitter outputCommitter, String location, ResourceSchema rs) {
             this.sf = sf;
             this.wrappedOutputCommitter = outputCommitter;
             this.location = location;
             this.rs = rs;
         }
+
         @Override
         public void abortTask(TaskAttemptContext context) throws IOException {
             wrappedOutputCommitter.abortTask(context);
@@ -126,7 +128,7 @@
 
         @Override
         public boolean needsTaskCommit(TaskAttemptContext context)
-                throws IOException {
+            throws IOException {
             return wrappedOutputCommitter.needsTaskCommit(context);
         }
 
@@ -139,28 +141,28 @@
         public void setupTask(TaskAttemptContext context) throws IOException {
             wrappedOutputCommitter.setupTask(context);
         }
-        
+
         public void commitJob(JobContext context) throws IOException {
             wrappedOutputCommitter.commitJob(context);
             if (sf instanceof StoreMetadata) {
                 if (rs != null) {
                     ((StoreMetadata) sf).storeSchema(
-                            rs, location, new Job(context.getConfiguration()) );
+                        rs, location, new Job(context.getConfiguration()));
                 }
             }
         }
-        
+
         @Override
         public void cleanupJob(JobContext context) throws IOException {
             wrappedOutputCommitter.cleanupJob(context);
             if (sf instanceof StoreMetadata) {
                 if (rs != null) {
                     ((StoreMetadata) sf).storeSchema(
-                            rs, location, new Job(context.getConfiguration()) );
+                        rs, location, new Job(context.getConfiguration()));
                 }
             }
         }
-        
+
         public void abortJob(JobContext context, JobStatus.State state) throws IOException {
             wrappedOutputCommitter.abortJob(context, state);
         }
diff --git a/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/MockLoader.java b/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/MockLoader.java
index d95b1de..de69182 100644
--- a/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/MockLoader.java
+++ b/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/MockLoader.java
@@ -40,138 +40,140 @@
 import org.apache.pig.data.Tuple;
 
 public class MockLoader extends LoadFunc {
-  private static final class MockRecordReader extends RecordReader<Object, Object> {
-    @Override
-    public void close() throws IOException {
+    private static final class MockRecordReader extends RecordReader<Object, Object> {
+        @Override
+        public void close() throws IOException {
+        }
+
+        @Override
+        public Object getCurrentKey() throws IOException, InterruptedException {
+            return "mockKey";
+        }
+
+        @Override
+        public Object getCurrentValue() throws IOException, InterruptedException {
+            return "mockValue";
+        }
+
+        @Override
+        public float getProgress() throws IOException, InterruptedException {
+            return 0.5f;
+        }
+
+        @Override
+        public void initialize(InputSplit split, TaskAttemptContext arg1) throws IOException,
+            InterruptedException {
+        }
+
+        @Override
+        public boolean nextKeyValue() throws IOException, InterruptedException {
+            return true;
+        }
     }
 
-    @Override
-    public Object getCurrentKey() throws IOException, InterruptedException {
-      return "mockKey";
+    private static final class MockInputSplit extends InputSplit implements Writable {
+        private String location;
+
+        public MockInputSplit() {
+        }
+
+        public MockInputSplit(String location) {
+            this.location = location;
+        }
+
+        @Override
+        public String[] getLocations() throws IOException, InterruptedException {
+            return new String[]{location};
+        }
+
+        @Override
+        public long getLength() throws IOException, InterruptedException {
+            return 10000000;
+        }
+
+        @Override
+        public boolean equals(Object arg0) {
+            return arg0 == this;
+        }
+
+        @Override
+        public int hashCode() {
+            return location.hashCode();
+        }
+
+        @Override
+        public void readFields(DataInput arg0) throws IOException {
+            location = arg0.readUTF();
+        }
+
+        @Override
+        public void write(DataOutput arg0) throws IOException {
+            arg0.writeUTF(location);
+        }
     }
 
-    @Override
-    public Object getCurrentValue() throws IOException, InterruptedException {
-      return "mockValue";
+    private static final class MockInputFormat extends InputFormat {
+
+        private final String location;
+
+        public MockInputFormat(String location) {
+            this.location = location;
+        }
+
+        @Override
+        public RecordReader createRecordReader(InputSplit arg0, TaskAttemptContext arg1)
+            throws IOException, InterruptedException {
+            return new MockRecordReader();
+        }
+
+        @Override
+        public List getSplits(JobContext arg0) throws IOException, InterruptedException {
+            return Arrays.asList(new MockInputSplit(location));
+        }
     }
 
-    @Override
-    public float getProgress() throws IOException, InterruptedException {
-      return 0.5f;
+    private static final Map<String, Iterable<Tuple>> locationToData = new HashMap<String, Iterable<Tuple>>();
+
+    public static void setData(String location, Iterable<Tuple> data) {
+        locationToData.put(location, data);
     }
 
-    @Override
-    public void initialize(InputSplit split, TaskAttemptContext arg1) throws IOException,
-        InterruptedException {
-    }
-
-    @Override
-    public boolean nextKeyValue() throws IOException, InterruptedException {
-      return true;
-    }
-  }
-
-  private static final class MockInputSplit extends InputSplit implements Writable  {
     private String location;
-    public MockInputSplit() {
-    }
-    public MockInputSplit(String location) {
-      this.location = location;
+
+    private Iterator<Tuple> data;
+
+    @Override
+    public String relativeToAbsolutePath(String location, Path curDir) throws IOException {
+        return location;
     }
 
     @Override
-    public String[] getLocations() throws IOException, InterruptedException {
-      return new String[] { location };
+    public void setLocation(String location, Job job) throws IOException {
+        this.location = location;
+        if (location == null) {
+            throw new IOException("null location passed to MockLoader");
+        }
+        this.data = locationToData.get(location).iterator();
+        if (this.data == null) {
+            throw new IOException("No data configured for location: " + location);
+        }
     }
 
     @Override
-    public long getLength() throws IOException, InterruptedException {
-      return 10000000;
+    public Tuple getNext() throws IOException {
+        if (data == null) {
+            throw new IOException("data was not correctly initialized in MockLoader");
+        }
+        return data.hasNext() ? data.next() : null;
     }
 
     @Override
-    public boolean equals(Object arg0) {
-      return arg0==this;
+    public InputFormat getInputFormat() throws IOException {
+        return new MockInputFormat(location);
     }
 
     @Override
-    public int hashCode() {
-      return location.hashCode();
+    public void prepareToRead(RecordReader arg0, PigSplit arg1) throws IOException {
     }
 
-    @Override
-    public void readFields(DataInput arg0) throws IOException {
-      location = arg0.readUTF();
-    }
-
-    @Override
-    public void write(DataOutput arg0) throws IOException {
-      arg0.writeUTF(location);
-    }
-  }
-
-  private static final class MockInputFormat extends InputFormat {
-
-    private final String location;
-
-    public MockInputFormat(String location) {
-      this.location = location;
-    }
-
-    @Override
-    public RecordReader createRecordReader(InputSplit arg0, TaskAttemptContext arg1)
-        throws IOException, InterruptedException {
-      return new MockRecordReader();
-    }
-
-    @Override
-    public List getSplits(JobContext arg0) throws IOException, InterruptedException {
-      return Arrays.asList(new MockInputSplit(location));
-    }
-  }
-
-  private static final Map<String, Iterable<Tuple>> locationToData = new HashMap<String, Iterable<Tuple>>();
-
-  public static void setData(String location, Iterable<Tuple> data) {
-    locationToData.put(location, data);
-  }
-
-  private String location;
-
-  private Iterator<Tuple> data;
-
-  @Override
-  public String relativeToAbsolutePath(String location, Path curDir) throws IOException {
-    return location;
-  }
-
-  @Override
-  public void setLocation(String location, Job job) throws IOException {
-    this.location = location;
-    if (location == null) {
-      throw new IOException("null location passed to MockLoader");
-    }
-    this.data = locationToData.get(location).iterator();
-    if (this.data == null) {
-      throw new IOException("No data configured for location: "+location);
-    }
-  }
-
-  @Override
-  public Tuple getNext() throws IOException {
-    if (data == null) {
-      throw new IOException("data was not correctly initialized in MockLoader");
-    }
-    return data.hasNext() ? data.next() : null;
-  }
-
-  @Override
-  public InputFormat getInputFormat() throws IOException {
-    return new MockInputFormat(location);
-  }
-
-  @Override
-  public void prepareToRead(RecordReader arg0, PigSplit arg1) throws IOException {
-  }
-
 }
diff --git a/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/MyPigStorage.java b/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/MyPigStorage.java
index 74e69c4..a37dc1c 100644
--- a/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/MyPigStorage.java
+++ b/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/MyPigStorage.java
@@ -24,15 +24,16 @@
 
 public class MyPigStorage extends PigStorage {
 
-  String arg2;
-  public MyPigStorage(String arg1, String arg2) throws IOException {
-    super(arg1);
-    this.arg2 = arg2;
-  }
-  
-  @Override
-  public void putNext(Tuple t) throws IOException {
-      t.append(arg2);
-      super.putNext(t);
-  }
+    String arg2;
+
+    public MyPigStorage(String arg1, String arg2) throws IOException {
+        super(arg1);
+        this.arg2 = arg2;
+    }
+
+    @Override
+    public void putNext(Tuple t) throws IOException {
+        t.append(arg2);
+        super.putNext(t);
+    }
 }
diff --git a/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatLoader.java b/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatLoader.java
index 2a2eb25..da6d13f 100644
--- a/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatLoader.java
+++ b/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatLoader.java
@@ -49,393 +49,395 @@
 import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema;
 
 public class TestHCatLoader extends TestCase {
-  private static final String TEST_DATA_DIR = System.getProperty("user.dir") +
-      "/build/test/data/" + TestHCatLoader.class.getCanonicalName();
-  private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse";
-  private static final String BASIC_FILE_NAME = TEST_DATA_DIR + "/basic.input.data";
-  private static final String COMPLEX_FILE_NAME = TEST_DATA_DIR + "/complex.input.data";
+    private static final String TEST_DATA_DIR = System.getProperty("user.dir") +
+        "/build/test/data/" + TestHCatLoader.class.getCanonicalName();
+    private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse";
+    private static final String BASIC_FILE_NAME = TEST_DATA_DIR + "/basic.input.data";
+    private static final String COMPLEX_FILE_NAME = TEST_DATA_DIR + "/complex.input.data";
 
-  private static final String BASIC_TABLE = "junit_unparted_basic";
-  private static final String COMPLEX_TABLE = "junit_unparted_complex";
-  private static final String PARTITIONED_TABLE = "junit_parted_basic";
-  private static final String SPECIFIC_SIZE_TABLE = "junit_specific_size";
-  private static Driver driver;
+    private static final String BASIC_TABLE = "junit_unparted_basic";
+    private static final String COMPLEX_TABLE = "junit_unparted_complex";
+    private static final String PARTITIONED_TABLE = "junit_parted_basic";
+    private static final String SPECIFIC_SIZE_TABLE = "junit_specific_size";
+    private static Driver driver;
 
-  private static int guardTestCount = 6; // ugh, instantiate using introspection in guardedSetupBeforeClass
-  private static boolean setupHasRun = false;
+    private static int guardTestCount = 6; // ugh, instantiate using introspection in guardedSetupBeforeClass
+    private static boolean setupHasRun = false;
 
-  private static Map<Integer,Pair<Integer,String>> basicInputData;
+    private static Map<Integer, Pair<Integer, String>> basicInputData;
 
-  private void dropTable(String tablename) throws IOException, CommandNeedRetryException{
-    driver.run("drop table "+tablename);
-  }
-  private void createTable(String tablename, String schema, String partitionedBy) throws IOException, CommandNeedRetryException{
-    String createTable;
-    createTable = "create table "+tablename+"("+schema+") ";
-    if ((partitionedBy != null)&&(!partitionedBy.trim().isEmpty())){
-      createTable = createTable + "partitioned by ("+partitionedBy+") ";
-    }
-    createTable = createTable + "stored as RCFILE tblproperties('hcat.isd'='org.apache.hcatalog.rcfile.RCFileInputDriver'," +
-        "'hcat.osd'='org.apache.hcatalog.rcfile.RCFileOutputDriver') ";
-    int retCode = driver.run(createTable).getResponseCode();
-    if(retCode != 0) {
-      throw new IOException("Failed to create table. ["+createTable+"], return code from hive driver : ["+retCode+"]");
-    }
-  }
-
-  private void createTable(String tablename, String schema) throws IOException, CommandNeedRetryException{
-    createTable(tablename,schema,null);
-  }
-
-  protected void guardedSetUpBeforeClass() throws Exception {
-    if (!setupHasRun){
-      setupHasRun = true;
-    }else{
-      return;
+    private void dropTable(String tablename) throws IOException, CommandNeedRetryException {
+        driver.run("drop table " + tablename);
     }
 
-    File f = new File(TEST_WAREHOUSE_DIR);
-    if (f.exists()) {
-      FileUtil.fullyDelete(f);
-    }
-    new File(TEST_WAREHOUSE_DIR).mkdirs();
-
-    HiveConf hiveConf = new HiveConf(this.getClass());
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR);
-    driver = new Driver(hiveConf);
-    SessionState.start(new CliSessionState(hiveConf));
-
-    cleanup();
-
-    createTable(BASIC_TABLE,"a int, b string");
-    createTable(COMPLEX_TABLE,
-        "name string, studentid int, "
-        + "contact struct<phno:string,email:string>, "
-        + "currently_registered_courses array<string>, "
-        + "current_grades map<string,string>, "
-        + "phnos array<struct<phno:string,type:string>>");
-
-    createTable(PARTITIONED_TABLE,"a int, b string","bkt string");
-    createTable(SPECIFIC_SIZE_TABLE, "a int, b string");
-
-    int LOOP_SIZE = 3;
-    String[] input = new String[LOOP_SIZE*LOOP_SIZE];
-    basicInputData = new HashMap<Integer,Pair<Integer,String>>();
-    int k = 0;
-    for(int i = 1; i <= LOOP_SIZE; i++) {
-      String si = i + "";
-      for(int j=1;j<=LOOP_SIZE;j++) {
-        String sj = "S"+j+"S";
-        input[k] = si + "\t" + sj;
-        basicInputData.put(k, new Pair<Integer,String>(i,sj));
-        k++;
-      }
-    }
-    HcatTestUtils.createTestDataFile(BASIC_FILE_NAME, input);
-    HcatTestUtils.createTestDataFile(COMPLEX_FILE_NAME,
-        new String[]{
-            //"Henry Jekyll\t42\t(415-253-6367,hjekyll@contemporary.edu.uk)\t{(PHARMACOLOGY),(PSYCHIATRY)},[PHARMACOLOGY#A-,PSYCHIATRY#B+],{(415-253-6367,cell),(408-253-6367,landline)}",
-            //"Edward Hyde\t1337\t(415-253-6367,anonymous@b44chan.org)\t{(CREATIVE_WRITING),(COPYRIGHT_LAW)},[CREATIVE_WRITING#A+,COPYRIGHT_LAW#D],{(415-253-6367,cell),(408-253-6367,landline)}",
+    private void createTable(String tablename, String schema, String partitionedBy) throws IOException, CommandNeedRetryException {
+        String createTable;
+        createTable = "create table " + tablename + "(" + schema + ") ";
+        if ((partitionedBy != null) && (!partitionedBy.trim().isEmpty())) {
+            createTable = createTable + "partitioned by (" + partitionedBy + ") ";
         }
-    );
-
-    PigServer server = new PigServer(ExecType.LOCAL);
-    server.setBatchOn();
-    server.registerQuery("A = load '"+BASIC_FILE_NAME+"' as (a:int, b:chararray);");
-
-    server.registerQuery("store A into '"+BASIC_TABLE+"' using org.apache.hcatalog.pig.HCatStorer();");
-    server.registerQuery("store A into '" + SPECIFIC_SIZE_TABLE + "' using org.apache.hcatalog.pig.HCatStorer();");
-    server.registerQuery("B = foreach A generate a,b;");
-    server.registerQuery("B2 = filter B by a < 2;");
-    server.registerQuery("store B2 into '"+PARTITIONED_TABLE+"' using org.apache.hcatalog.pig.HCatStorer('bkt=0');");
-
-    server.registerQuery("C = foreach A generate a,b;");
-    server.registerQuery("C2 = filter C by a >= 2;");
-    server.registerQuery("store C2 into '"+PARTITIONED_TABLE+"' using org.apache.hcatalog.pig.HCatStorer('bkt=1');");
-
-    server.registerQuery("D = load '"+COMPLEX_FILE_NAME+"' as (name:chararray, studentid:int, contact:tuple(phno:chararray,email:chararray), currently_registered_courses:bag{innertup:tuple(course:chararray)}, current_grades:map[ ] , phnos :bag{innertup:tuple(phno:chararray,type:chararray)});");
-    server.registerQuery("store D into '"+COMPLEX_TABLE+"' using org.apache.hcatalog.pig.HCatStorer();");
-    server.executeBatch();
-
-  }
-  private void cleanup() throws IOException, CommandNeedRetryException {
-    dropTable(BASIC_TABLE);
-    dropTable(COMPLEX_TABLE);
-    dropTable(PARTITIONED_TABLE);
-    dropTable(SPECIFIC_SIZE_TABLE);
-  }
-
-  protected void guardedTearDownAfterClass() throws Exception {
-    guardTestCount--;
-    if (guardTestCount > 0){
-      return;
-    }
-    cleanup();
-  }
-
-  @Override
-  protected void setUp() throws Exception {
-    guardedSetUpBeforeClass();
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    guardedTearDownAfterClass();
-  }
-
-  public void testSchemaLoadBasic() throws IOException{
-
-    PigServer server = new PigServer(ExecType.LOCAL);
-
-    // test that schema was loaded correctly
-    server.registerQuery("X = load '"+BASIC_TABLE+"' using org.apache.hcatalog.pig.HCatLoader();");
-    Schema dumpedXSchema = server.dumpSchema("X");
-    List<FieldSchema> Xfields = dumpedXSchema.getFields();
-    assertEquals(2,Xfields.size());
-    assertTrue(Xfields.get(0).alias.equalsIgnoreCase("a"));
-    assertTrue(Xfields.get(0).type == DataType.INTEGER);
-    assertTrue(Xfields.get(1).alias.equalsIgnoreCase("b"));
-    assertTrue(Xfields.get(1).type == DataType.CHARARRAY);
-
-  }
-
-  public void testReadDataBasic() throws IOException {
-    PigServer server = new PigServer(ExecType.LOCAL);
-
-    server.registerQuery("X = load '"+BASIC_TABLE+"' using org.apache.hcatalog.pig.HCatLoader();");
-    Iterator<Tuple> XIter = server.openIterator("X");
-    int numTuplesRead = 0;
-    while( XIter.hasNext() ){
-      Tuple t = XIter.next();
-      assertEquals(2,t.size());
-      assertTrue(t.get(0).getClass() == Integer.class);
-      assertTrue(t.get(1).getClass() == String.class);
-      assertEquals(t.get(0),basicInputData.get(numTuplesRead).first);
-      assertEquals(t.get(1),basicInputData.get(numTuplesRead).second);
-      numTuplesRead++;
-    }
-    assertEquals(basicInputData.size(),numTuplesRead);
-  }
-
-  public void testSchemaLoadComplex() throws IOException{
-
-    PigServer server = new PigServer(ExecType.LOCAL);
-
-    // test that schema was loaded correctly
-    server.registerQuery("K = load '"+COMPLEX_TABLE+"' using org.apache.hcatalog.pig.HCatLoader();");
-    Schema dumpedKSchema = server.dumpSchema("K");
-    List<FieldSchema> Kfields = dumpedKSchema.getFields();
-    assertEquals(6, Kfields.size());
-
-    assertEquals(DataType.CHARARRAY,Kfields.get(0).type);
-    assertEquals("name",Kfields.get(0).alias.toLowerCase());
-
-    assertEquals( DataType.INTEGER,Kfields.get(1).type);
-    assertEquals("studentid", Kfields.get(1).alias.toLowerCase());
-
-    assertEquals(DataType.TUPLE, Kfields.get(2).type);
-    assertEquals("contact", Kfields.get(2).alias.toLowerCase());
-    {
-      assertNotNull(Kfields.get(2).schema);
-      assertTrue(Kfields.get(2).schema.getFields().size() == 2);
-      assertTrue(Kfields.get(2).schema.getFields().get(0).type == DataType.CHARARRAY);
-      assertTrue(Kfields.get(2).schema.getFields().get(0).alias.equalsIgnoreCase("phno"));
-      assertTrue(Kfields.get(2).schema.getFields().get(1).type == DataType.CHARARRAY);
-      assertTrue(Kfields.get(2).schema.getFields().get(1).alias.equalsIgnoreCase("email"));
-    }
-    assertEquals(DataType.BAG, Kfields.get(3).type);
-    assertEquals("currently_registered_courses", Kfields.get(3).alias.toLowerCase());
-    {
-      assertNotNull(Kfields.get(3).schema);
-      assertEquals(1,Kfields.get(3).schema.getFields().size());
-      assertEquals(DataType.TUPLE,Kfields.get(3).schema.getFields().get(0).type);
-      assertNotNull(Kfields.get(3).schema.getFields().get(0).schema);
-      assertEquals(1,Kfields.get(3).schema.getFields().get(0).schema.getFields().size());
-      assertEquals(DataType.CHARARRAY,Kfields.get(3).schema.getFields().get(0).schema.getFields().get(0).type);
-      // assertEquals("course",Kfields.get(3).schema.getFields().get(0).schema.getFields().get(0).alias.toLowerCase());
-      // commented out, because the name becomes "innerfield" by default - we call it "course" in pig,
-      // but in the metadata, it'd be anonymous, so this would be autogenerated, which is fine
-    }
-    assertEquals(DataType.MAP,Kfields.get(4).type);
-    assertEquals("current_grades",Kfields.get(4).alias.toLowerCase());
-    assertEquals(DataType.BAG,Kfields.get(5).type);
-    assertEquals("phnos", Kfields.get(5).alias.toLowerCase());
-    {
-      assertNotNull(Kfields.get(5).schema);
-      assertEquals(1,Kfields.get(5).schema.getFields().size());
-      assertEquals(DataType.TUPLE,Kfields.get(5).schema.getFields().get(0).type);
-      assertNotNull(Kfields.get(5).schema.getFields().get(0).schema);
-      assertTrue(Kfields.get(5).schema.getFields().get(0).schema.getFields().size() == 2);
-      assertEquals(DataType.CHARARRAY,Kfields.get(5).schema.getFields().get(0).schema.getFields().get(0).type);
-      assertEquals("phno",Kfields.get(5).schema.getFields().get(0).schema.getFields().get(0).alias.toLowerCase());
-      assertEquals(DataType.CHARARRAY,Kfields.get(5).schema.getFields().get(0).schema.getFields().get(1).type);
-      assertEquals("type",Kfields.get(5).schema.getFields().get(0).schema.getFields().get(1).alias.toLowerCase());
+        createTable = createTable + "stored as RCFILE tblproperties('hcat.isd'='org.apache.hcatalog.rcfile.RCFileInputDriver'," +
+            "'hcat.osd'='org.apache.hcatalog.rcfile.RCFileOutputDriver') ";
+        int retCode = driver.run(createTable).getResponseCode();
+        if (retCode != 0) {
+            throw new IOException("Failed to create table. [" + createTable + "], return code from hive driver : [" + retCode + "]");
+        }
     }
 
-  }
-
-  public void testReadPartitionedBasic() throws IOException, CommandNeedRetryException {
-    PigServer server = new PigServer(ExecType.LOCAL);
-
-    driver.run("select * from "+PARTITIONED_TABLE);
-    ArrayList<String> valuesReadFromHiveDriver = new ArrayList<String>();
-    driver.getResults(valuesReadFromHiveDriver);
-    assertEquals(basicInputData.size(), valuesReadFromHiveDriver.size());
-
-    server.registerQuery("W = load '"+PARTITIONED_TABLE+"' using org.apache.hcatalog.pig.HCatLoader();");
-    Schema dumpedWSchema = server.dumpSchema("W");
-    List<FieldSchema> Wfields = dumpedWSchema.getFields();
-    assertEquals(3,Wfields.size());
-    assertTrue(Wfields.get(0).alias.equalsIgnoreCase("a"));
-    assertTrue(Wfields.get(0).type == DataType.INTEGER);
-    assertTrue(Wfields.get(1).alias.equalsIgnoreCase("b"));
-    assertTrue(Wfields.get(1).type == DataType.CHARARRAY);
-    assertTrue(Wfields.get(2).alias.equalsIgnoreCase("bkt"));
-    assertTrue(Wfields.get(2).type == DataType.CHARARRAY);
-
-    Iterator<Tuple> WIter = server.openIterator("W");
-    Collection<Pair<Integer,String>> valuesRead = new ArrayList<Pair<Integer,String>>();
-    while( WIter.hasNext() ){
-      Tuple t = WIter.next();
-      assertTrue(t.size() == 3);
-      assertTrue(t.get(0).getClass() == Integer.class);
-      assertTrue(t.get(1).getClass() == String.class);
-      assertTrue(t.get(2).getClass() == String.class);
-      valuesRead.add(new Pair<Integer,String>((Integer)t.get(0),(String)t.get(1)));
-      if ((Integer)t.get(0) < 2){
-        assertEquals("0",t.get(2));
-      }else{
-        assertEquals("1",t.get(2));
-      }
+    private void createTable(String tablename, String schema) throws IOException, CommandNeedRetryException {
+        createTable(tablename, schema, null);
     }
-    assertEquals(valuesReadFromHiveDriver.size(),valuesRead.size());
 
-    server.registerQuery("P1 = load '"+PARTITIONED_TABLE+"' using org.apache.hcatalog.pig.HCatLoader();");
-    server.registerQuery("P1filter = filter P1 by bkt == '0';");
-    Iterator<Tuple> P1Iter = server.openIterator("P1filter");
-    int count1 = 0;
-    while( P1Iter.hasNext() ) {
-      Tuple t = P1Iter.next();
+    protected void guardedSetUpBeforeClass() throws Exception {
+        if (!setupHasRun) {
+            setupHasRun = true;
+        } else {
+            return;
+        }
 
-      assertEquals("0", t.get(2));
-      assertEquals(1, t.get(0));
-      count1++;
+        File f = new File(TEST_WAREHOUSE_DIR);
+        if (f.exists()) {
+            FileUtil.fullyDelete(f);
+        }
+        new File(TEST_WAREHOUSE_DIR).mkdirs();
+
+        HiveConf hiveConf = new HiveConf(this.getClass());
+        hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+        hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+        hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+        hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR);
+        driver = new Driver(hiveConf);
+        SessionState.start(new CliSessionState(hiveConf));
+
+        cleanup();
+
+        createTable(BASIC_TABLE, "a int, b string");
+        createTable(COMPLEX_TABLE,
+            "name string, studentid int, "
+                + "contact struct<phno:string,email:string>, "
+                + "currently_registered_courses array<string>, "
+                + "current_grades map<string,string>, "
+                + "phnos array<struct<phno:string,type:string>>");
+
+        createTable(PARTITIONED_TABLE, "a int, b string", "bkt string");
+        createTable(SPECIFIC_SIZE_TABLE, "a int, b string");
+
+        int LOOP_SIZE = 3;
+        String[] input = new String[LOOP_SIZE * LOOP_SIZE];
+        basicInputData = new HashMap<Integer, Pair<Integer, String>>();
+        int k = 0;
+        for (int i = 1; i <= LOOP_SIZE; i++) {
+            String si = i + "";
+            for (int j = 1; j <= LOOP_SIZE; j++) {
+                String sj = "S" + j + "S";
+                input[k] = si + "\t" + sj;
+                basicInputData.put(k, new Pair<Integer, String>(i, sj));
+                k++;
+            }
+        }
+        HcatTestUtils.createTestDataFile(BASIC_FILE_NAME, input);
+        HcatTestUtils.createTestDataFile(COMPLEX_FILE_NAME,
+            new String[]{
+                //"Henry Jekyll\t42\t(415-253-6367,hjekyll@contemporary.edu.uk)\t{(PHARMACOLOGY),(PSYCHIATRY)},[PHARMACOLOGY#A-,PSYCHIATRY#B+],{(415-253-6367,cell),(408-253-6367,landline)}",
+                //"Edward Hyde\t1337\t(415-253-6367,anonymous@b44chan.org)\t{(CREATIVE_WRITING),(COPYRIGHT_LAW)},[CREATIVE_WRITING#A+,COPYRIGHT_LAW#D],{(415-253-6367,cell),(408-253-6367,landline)}",
+            }
+        );
+
+        PigServer server = new PigServer(ExecType.LOCAL);
+        server.setBatchOn();
+        server.registerQuery("A = load '" + BASIC_FILE_NAME + "' as (a:int, b:chararray);");
+
+        server.registerQuery("store A into '" + BASIC_TABLE + "' using org.apache.hcatalog.pig.HCatStorer();");
+        server.registerQuery("store A into '" + SPECIFIC_SIZE_TABLE + "' using org.apache.hcatalog.pig.HCatStorer();");
+        server.registerQuery("B = foreach A generate a,b;");
+        server.registerQuery("B2 = filter B by a < 2;");
+        server.registerQuery("store B2 into '" + PARTITIONED_TABLE + "' using org.apache.hcatalog.pig.HCatStorer('bkt=0');");
+
+        server.registerQuery("C = foreach A generate a,b;");
+        server.registerQuery("C2 = filter C by a >= 2;");
+        server.registerQuery("store C2 into '" + PARTITIONED_TABLE + "' using org.apache.hcatalog.pig.HCatStorer('bkt=1');");
+
+        server.registerQuery("D = load '" + COMPLEX_FILE_NAME + "' as (name:chararray, studentid:int, contact:tuple(phno:chararray,email:chararray), currently_registered_courses:bag{innertup:tuple(course:chararray)}, current_grades:map[ ] , phnos :bag{innertup:tuple(phno:chararray,type:chararray)});");
+        server.registerQuery("store D into '" + COMPLEX_TABLE + "' using org.apache.hcatalog.pig.HCatStorer();");
+        server.executeBatch();
+
     }
-    assertEquals(3, count1);
 
-    server.registerQuery("P2 = load '"+PARTITIONED_TABLE+"' using org.apache.hcatalog.pig.HCatLoader();");
-    server.registerQuery("P2filter = filter P2 by bkt == '1';");
-    Iterator<Tuple> P2Iter = server.openIterator("P2filter");
-    int count2 = 0;
-    while( P2Iter.hasNext() ) {
-      Tuple t = P2Iter.next();
-
-      assertEquals("1", t.get(2));
-      assertTrue(((Integer) t.get(0)) > 1);
-      count2++;
+    private void cleanup() throws IOException, CommandNeedRetryException {
+        dropTable(BASIC_TABLE);
+        dropTable(COMPLEX_TABLE);
+        dropTable(PARTITIONED_TABLE);
+        dropTable(SPECIFIC_SIZE_TABLE);
     }
-    assertEquals(6, count2);
-  }
 
-  public void testProjectionsBasic() throws IOException {
-
-    PigServer server = new PigServer(ExecType.LOCAL);
-
-    // projections are handled by using generate, not "as" on the Load
-
-    server.registerQuery("Y1 = load '"+BASIC_TABLE+"' using org.apache.hcatalog.pig.HCatLoader();");
-    server.registerQuery("Y2 = foreach Y1 generate a;");
-    server.registerQuery("Y3 = foreach Y1 generate b,a;");
-    Schema dumpedY2Schema = server.dumpSchema("Y2");
-    Schema dumpedY3Schema = server.dumpSchema("Y3");
-    List<FieldSchema> Y2fields = dumpedY2Schema.getFields();
-    List<FieldSchema> Y3fields = dumpedY3Schema.getFields();
-    assertEquals(1,Y2fields.size());
-    assertEquals("a",Y2fields.get(0).alias.toLowerCase());
-    assertEquals(DataType.INTEGER,Y2fields.get(0).type);
-    assertEquals(2,Y3fields.size());
-    assertEquals("b",Y3fields.get(0).alias.toLowerCase());
-    assertEquals(DataType.CHARARRAY,Y3fields.get(0).type);
-    assertEquals("a",Y3fields.get(1).alias.toLowerCase());
-    assertEquals(DataType.INTEGER,Y3fields.get(1).type);
-
-    int numTuplesRead = 0;
-    Iterator<Tuple> Y2Iter = server.openIterator("Y2");
-    while( Y2Iter.hasNext() ){
-      Tuple t = Y2Iter.next();
-      assertEquals(t.size(),1);
-      assertTrue(t.get(0).getClass() == Integer.class);
-      assertEquals(t.get(0),basicInputData.get(numTuplesRead).first);
-      numTuplesRead++;
+    protected void guardedTearDownAfterClass() throws Exception {
+        guardTestCount--;
+        if (guardTestCount > 0) {
+            return;
+        }
+        cleanup();
     }
-    numTuplesRead = 0;
-    Iterator<Tuple> Y3Iter = server.openIterator("Y3");
-    while( Y3Iter.hasNext() ){
-      Tuple t = Y3Iter.next();
-      assertEquals(t.size(),2);
-      assertTrue(t.get(0).getClass() == String.class);
-      assertEquals(t.get(0),basicInputData.get(numTuplesRead).second);
-      assertTrue(t.get(1).getClass() == Integer.class);
-      assertEquals(t.get(1),basicInputData.get(numTuplesRead).first);
-      numTuplesRead++;
+
+    @Override
+    protected void setUp() throws Exception {
+        guardedSetUpBeforeClass();
     }
-    assertEquals(basicInputData.size(),numTuplesRead);
-  }
 
-  public void testGetInputBytes() throws Exception {
-    File file = new File(TEST_WAREHOUSE_DIR + "/" + SPECIFIC_SIZE_TABLE + "/part-m-00000");
-    file.deleteOnExit();
-    RandomAccessFile randomAccessFile = new RandomAccessFile(file, "rw");
-    randomAccessFile.setLength(2L * 1024 * 1024 * 1024);
+    @Override
+    protected void tearDown() throws Exception {
+        guardedTearDownAfterClass();
+    }
 
-    Job job = new Job();
-    HCatLoader hCatLoader = new HCatLoader();
-    hCatLoader.setUDFContextSignature(this.getName());
-    hCatLoader.setLocation(SPECIFIC_SIZE_TABLE, job);
-    ResourceStatistics statistics = hCatLoader.getStatistics(file.getAbsolutePath(), job);
-    assertEquals(2048, (long) statistics.getmBytes());
-  }
+    public void testSchemaLoadBasic() throws IOException {
 
-  public void testConvertBooleanToInt() throws Exception {
-    String tbl = "test_convert_boolean_to_int";
-    String inputFileName = TEST_DATA_DIR + "/testConvertBooleanToInt/data.txt";
-    File inputDataDir = new File(inputFileName).getParentFile();
-    inputDataDir.mkdir();
+        PigServer server = new PigServer(ExecType.LOCAL);
 
-    String[] lines = new String[] {"llama\t1", "alpaca\t0"};
-    HcatTestUtils.createTestDataFile(inputFileName, lines);
+        // test that schema was loaded correctly
+        server.registerQuery("X = load '" + BASIC_TABLE + "' using org.apache.hcatalog.pig.HCatLoader();");
+        Schema dumpedXSchema = server.dumpSchema("X");
+        List<FieldSchema> Xfields = dumpedXSchema.getFields();
+        assertEquals(2, Xfields.size());
+        assertTrue(Xfields.get(0).alias.equalsIgnoreCase("a"));
+        assertTrue(Xfields.get(0).type == DataType.INTEGER);
+        assertTrue(Xfields.get(1).alias.equalsIgnoreCase("b"));
+        assertTrue(Xfields.get(1).type == DataType.CHARARRAY);
 
-    assertEquals(0, driver.run("drop table if exists " + tbl).getResponseCode());
-    assertEquals(0, driver.run("create external table " + tbl +
-        " (a string, b boolean) row format delimited fields terminated by '\t'" +
-        " stored as textfile location 'file://" +
-        inputDataDir.getAbsolutePath() + "'").getResponseCode());
+    }
 
-    Properties properties = new Properties();
-    properties.setProperty(HCatConstants.HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER, "true");
-    PigServer server = new PigServer(ExecType.LOCAL, properties);
-    server.registerQuery(
-        "data = load 'test_convert_boolean_to_int' using org.apache.hcatalog.pig.HCatLoader();");
-    Schema schema = server.dumpSchema("data");
-    assertEquals(2, schema.getFields().size());
+    public void testReadDataBasic() throws IOException {
+        PigServer server = new PigServer(ExecType.LOCAL);
 
-    assertEquals("a", schema.getField(0).alias);
-    assertEquals(DataType.CHARARRAY, schema.getField(0).type);
-    assertEquals("b", schema.getField(1).alias);
-    assertEquals(DataType.INTEGER, schema.getField(1).type);
+        server.registerQuery("X = load '" + BASIC_TABLE + "' using org.apache.hcatalog.pig.HCatLoader();");
+        Iterator<Tuple> XIter = server.openIterator("X");
+        int numTuplesRead = 0;
+        while (XIter.hasNext()) {
+            Tuple t = XIter.next();
+            assertEquals(2, t.size());
+            assertTrue(t.get(0).getClass() == Integer.class);
+            assertTrue(t.get(1).getClass() == String.class);
+            assertEquals(t.get(0), basicInputData.get(numTuplesRead).first);
+            assertEquals(t.get(1), basicInputData.get(numTuplesRead).second);
+            numTuplesRead++;
+        }
+        assertEquals(basicInputData.size(), numTuplesRead);
+    }
 
-    Iterator<Tuple> iterator = server.openIterator("data");
-    Tuple t = iterator.next();
-    assertEquals("llama", t.get(0));
-    // TODO: Figure out how to load a text file into Hive with boolean columns. This next assert
-    // passes because data was loaded as integers, not because it was converted.
-    assertEquals(1, t.get(1));
-    t = iterator.next();
-    assertEquals("alpaca", t.get(0));
-    assertEquals(0, t.get(1));
-    assertFalse(iterator.hasNext());
-  }
+    public void testSchemaLoadComplex() throws IOException {
+
+        PigServer server = new PigServer(ExecType.LOCAL);
+
+        // test that schema was loaded correctly
+        server.registerQuery("K = load '" + COMPLEX_TABLE + "' using org.apache.hcatalog.pig.HCatLoader();");
+        Schema dumpedKSchema = server.dumpSchema("K");
+        List<FieldSchema> Kfields = dumpedKSchema.getFields();
+        assertEquals(6, Kfields.size());
+
+        assertEquals(DataType.CHARARRAY, Kfields.get(0).type);
+        assertEquals("name", Kfields.get(0).alias.toLowerCase());
+
+        assertEquals(DataType.INTEGER, Kfields.get(1).type);
+        assertEquals("studentid", Kfields.get(1).alias.toLowerCase());
+
+        assertEquals(DataType.TUPLE, Kfields.get(2).type);
+        assertEquals("contact", Kfields.get(2).alias.toLowerCase());
+        {
+            assertNotNull(Kfields.get(2).schema);
+            assertTrue(Kfields.get(2).schema.getFields().size() == 2);
+            assertTrue(Kfields.get(2).schema.getFields().get(0).type == DataType.CHARARRAY);
+            assertTrue(Kfields.get(2).schema.getFields().get(0).alias.equalsIgnoreCase("phno"));
+            assertTrue(Kfields.get(2).schema.getFields().get(1).type == DataType.CHARARRAY);
+            assertTrue(Kfields.get(2).schema.getFields().get(1).alias.equalsIgnoreCase("email"));
+        }
+        assertEquals(DataType.BAG, Kfields.get(3).type);
+        assertEquals("currently_registered_courses", Kfields.get(3).alias.toLowerCase());
+        {
+            assertNotNull(Kfields.get(3).schema);
+            assertEquals(1, Kfields.get(3).schema.getFields().size());
+            assertEquals(DataType.TUPLE, Kfields.get(3).schema.getFields().get(0).type);
+            assertNotNull(Kfields.get(3).schema.getFields().get(0).schema);
+            assertEquals(1, Kfields.get(3).schema.getFields().get(0).schema.getFields().size());
+            assertEquals(DataType.CHARARRAY, Kfields.get(3).schema.getFields().get(0).schema.getFields().get(0).type);
+            // assertEquals("course",Kfields.get(3).schema.getFields().get(0).schema.getFields().get(0).alias.toLowerCase());
+            // commented out, because the name becomes "innerfield" by default - we call it "course" in pig,
+            // but in the metadata, it'd be anonymous, so this would be autogenerated, which is fine
+        }
+        assertEquals(DataType.MAP, Kfields.get(4).type);
+        assertEquals("current_grades", Kfields.get(4).alias.toLowerCase());
+        assertEquals(DataType.BAG, Kfields.get(5).type);
+        assertEquals("phnos", Kfields.get(5).alias.toLowerCase());
+        {
+            assertNotNull(Kfields.get(5).schema);
+            assertEquals(1, Kfields.get(5).schema.getFields().size());
+            assertEquals(DataType.TUPLE, Kfields.get(5).schema.getFields().get(0).type);
+            assertNotNull(Kfields.get(5).schema.getFields().get(0).schema);
+            assertTrue(Kfields.get(5).schema.getFields().get(0).schema.getFields().size() == 2);
+            assertEquals(DataType.CHARARRAY, Kfields.get(5).schema.getFields().get(0).schema.getFields().get(0).type);
+            assertEquals("phno", Kfields.get(5).schema.getFields().get(0).schema.getFields().get(0).alias.toLowerCase());
+            assertEquals(DataType.CHARARRAY, Kfields.get(5).schema.getFields().get(0).schema.getFields().get(1).type);
+            assertEquals("type", Kfields.get(5).schema.getFields().get(0).schema.getFields().get(1).alias.toLowerCase());
+        }
+
+    }
+
+    public void testReadPartitionedBasic() throws IOException, CommandNeedRetryException {
+        PigServer server = new PigServer(ExecType.LOCAL);
+
+        driver.run("select * from " + PARTITIONED_TABLE);
+        ArrayList<String> valuesReadFromHiveDriver = new ArrayList<String>();
+        driver.getResults(valuesReadFromHiveDriver);
+        assertEquals(basicInputData.size(), valuesReadFromHiveDriver.size());
+
+        server.registerQuery("W = load '" + PARTITIONED_TABLE + "' using org.apache.hcatalog.pig.HCatLoader();");
+        Schema dumpedWSchema = server.dumpSchema("W");
+        List<FieldSchema> Wfields = dumpedWSchema.getFields();
+        assertEquals(3, Wfields.size());
+        assertTrue(Wfields.get(0).alias.equalsIgnoreCase("a"));
+        assertTrue(Wfields.get(0).type == DataType.INTEGER);
+        assertTrue(Wfields.get(1).alias.equalsIgnoreCase("b"));
+        assertTrue(Wfields.get(1).type == DataType.CHARARRAY);
+        assertTrue(Wfields.get(2).alias.equalsIgnoreCase("bkt"));
+        assertTrue(Wfields.get(2).type == DataType.CHARARRAY);
+
+        Iterator<Tuple> WIter = server.openIterator("W");
+        Collection<Pair<Integer, String>> valuesRead = new ArrayList<Pair<Integer, String>>();
+        while (WIter.hasNext()) {
+            Tuple t = WIter.next();
+            assertTrue(t.size() == 3);
+            assertTrue(t.get(0).getClass() == Integer.class);
+            assertTrue(t.get(1).getClass() == String.class);
+            assertTrue(t.get(2).getClass() == String.class);
+            valuesRead.add(new Pair<Integer, String>((Integer) t.get(0), (String) t.get(1)));
+            if ((Integer) t.get(0) < 2) {
+                assertEquals("0", t.get(2));
+            } else {
+                assertEquals("1", t.get(2));
+            }
+        }
+        assertEquals(valuesReadFromHiveDriver.size(), valuesRead.size());
+
+        server.registerQuery("P1 = load '" + PARTITIONED_TABLE + "' using org.apache.hcatalog.pig.HCatLoader();");
+        server.registerQuery("P1filter = filter P1 by bkt == '0';");
+        Iterator<Tuple> P1Iter = server.openIterator("P1filter");
+        int count1 = 0;
+        while (P1Iter.hasNext()) {
+            Tuple t = P1Iter.next();
+
+            assertEquals("0", t.get(2));
+            assertEquals(1, t.get(0));
+            count1++;
+        }
+        assertEquals(3, count1);
+
+        server.registerQuery("P2 = load '" + PARTITIONED_TABLE + "' using org.apache.hcatalog.pig.HCatLoader();");
+        server.registerQuery("P2filter = filter P2 by bkt == '1';");
+        Iterator<Tuple> P2Iter = server.openIterator("P2filter");
+        int count2 = 0;
+        while (P2Iter.hasNext()) {
+            Tuple t = P2Iter.next();
+
+            assertEquals("1", t.get(2));
+            assertTrue(((Integer) t.get(0)) > 1);
+            count2++;
+        }
+        assertEquals(6, count2);
+    }
+
+    public void testProjectionsBasic() throws IOException {
+
+        PigServer server = new PigServer(ExecType.LOCAL);
+
+        // projections are handled by using generate, not "as" on the Load
+
+        server.registerQuery("Y1 = load '" + BASIC_TABLE + "' using org.apache.hcatalog.pig.HCatLoader();");
+        server.registerQuery("Y2 = foreach Y1 generate a;");
+        server.registerQuery("Y3 = foreach Y1 generate b,a;");
+        Schema dumpedY2Schema = server.dumpSchema("Y2");
+        Schema dumpedY3Schema = server.dumpSchema("Y3");
+        List<FieldSchema> Y2fields = dumpedY2Schema.getFields();
+        List<FieldSchema> Y3fields = dumpedY3Schema.getFields();
+        assertEquals(1, Y2fields.size());
+        assertEquals("a", Y2fields.get(0).alias.toLowerCase());
+        assertEquals(DataType.INTEGER, Y2fields.get(0).type);
+        assertEquals(2, Y3fields.size());
+        assertEquals("b", Y3fields.get(0).alias.toLowerCase());
+        assertEquals(DataType.CHARARRAY, Y3fields.get(0).type);
+        assertEquals("a", Y3fields.get(1).alias.toLowerCase());
+        assertEquals(DataType.INTEGER, Y3fields.get(1).type);
+
+        int numTuplesRead = 0;
+        Iterator<Tuple> Y2Iter = server.openIterator("Y2");
+        while (Y2Iter.hasNext()) {
+            Tuple t = Y2Iter.next();
+            assertEquals(t.size(), 1);
+            assertTrue(t.get(0).getClass() == Integer.class);
+            assertEquals(t.get(0), basicInputData.get(numTuplesRead).first);
+            numTuplesRead++;
+        }
+        numTuplesRead = 0;
+        Iterator<Tuple> Y3Iter = server.openIterator("Y3");
+        while (Y3Iter.hasNext()) {
+            Tuple t = Y3Iter.next();
+            assertEquals(t.size(), 2);
+            assertTrue(t.get(0).getClass() == String.class);
+            assertEquals(t.get(0), basicInputData.get(numTuplesRead).second);
+            assertTrue(t.get(1).getClass() == Integer.class);
+            assertEquals(t.get(1), basicInputData.get(numTuplesRead).first);
+            numTuplesRead++;
+        }
+        assertEquals(basicInputData.size(), numTuplesRead);
+    }
+
+    public void testGetInputBytes() throws Exception {
+        File file = new File(TEST_WAREHOUSE_DIR + "/" + SPECIFIC_SIZE_TABLE + "/part-m-00000");
+        file.deleteOnExit();
+        RandomAccessFile randomAccessFile = new RandomAccessFile(file, "rw");
+        randomAccessFile.setLength(2L * 1024 * 1024 * 1024);
+
+        Job job = new Job();
+        HCatLoader hCatLoader = new HCatLoader();
+        hCatLoader.setUDFContextSignature(this.getName());
+        hCatLoader.setLocation(SPECIFIC_SIZE_TABLE, job);
+        ResourceStatistics statistics = hCatLoader.getStatistics(file.getAbsolutePath(), job);
+        assertEquals(2048, (long) statistics.getmBytes());
+    }
+
+    public void testConvertBooleanToInt() throws Exception {
+        String tbl = "test_convert_boolean_to_int";
+        String inputFileName = TEST_DATA_DIR + "/testConvertBooleanToInt/data.txt";
+        File inputDataDir = new File(inputFileName).getParentFile();
+        inputDataDir.mkdir();
+
+        String[] lines = new String[]{"llama\t1", "alpaca\t0"};
+        HcatTestUtils.createTestDataFile(inputFileName, lines);
+
+        assertEquals(0, driver.run("drop table if exists " + tbl).getResponseCode());
+        assertEquals(0, driver.run("create external table " + tbl +
+            " (a string, b boolean) row format delimited fields terminated by '\t'" +
+            " stored as textfile location 'file://" +
+            inputDataDir.getAbsolutePath() + "'").getResponseCode());
+
+        Properties properties = new Properties();
+        properties.setProperty(HCatConstants.HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER, "true");
+        PigServer server = new PigServer(ExecType.LOCAL, properties);
+        server.registerQuery(
+            "data = load 'test_convert_boolean_to_int' using org.apache.hcatalog.pig.HCatLoader();");
+        Schema schema = server.dumpSchema("data");
+        assertEquals(2, schema.getFields().size());
+
+        assertEquals("a", schema.getField(0).alias);
+        assertEquals(DataType.CHARARRAY, schema.getField(0).type);
+        assertEquals("b", schema.getField(1).alias);
+        assertEquals(DataType.INTEGER, schema.getField(1).type);
+
+        Iterator<Tuple> iterator = server.openIterator("data");
+        Tuple t = iterator.next();
+        assertEquals("llama", t.get(0));
+        // TODO: Figure out how to load a text file into Hive with boolean columns. This next assert
+        // passes because data was loaded as integers, not because it was converted.
+        assertEquals(1, t.get(1));
+        t = iterator.next();
+        assertEquals("alpaca", t.get(0));
+        assertEquals(0, t.get(1));
+        assertFalse(iterator.hasNext());
+    }
 }
diff --git a/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatLoaderComplexSchema.java b/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatLoaderComplexSchema.java
index 4b20785..bb82ee8 100644
--- a/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatLoaderComplexSchema.java
+++ b/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatLoaderComplexSchema.java
@@ -50,239 +50,252 @@
 
 public class TestHCatLoaderComplexSchema {
 
-  //private static MiniCluster cluster = MiniCluster.buildCluster();
-  private static Driver driver;
-  //private static Properties props;
-  private static final Logger LOG = LoggerFactory.getLogger(TestHCatLoaderComplexSchema.class);
-  private void dropTable(String tablename) throws IOException, CommandNeedRetryException{
-    driver.run("drop table "+tablename);
-  }
+    //private static MiniCluster cluster = MiniCluster.buildCluster();
+    private static Driver driver;
+    //private static Properties props;
+    private static final Logger LOG = LoggerFactory.getLogger(TestHCatLoaderComplexSchema.class);
 
-  private void createTable(String tablename, String schema, String partitionedBy) throws IOException, CommandNeedRetryException{
-    String createTable;
-    createTable = "create table "+tablename+"("+schema+") ";
-    if ((partitionedBy != null)&&(!partitionedBy.trim().isEmpty())){
-      createTable = createTable + "partitioned by ("+partitionedBy+") ";
+    private void dropTable(String tablename) throws IOException, CommandNeedRetryException {
+        driver.run("drop table " + tablename);
     }
-    createTable = createTable + "stored as RCFILE tblproperties('hcat.isd'='org.apache.hcatalog.rcfile.RCFileInputDriver'," +
-        "'hcat.osd'='org.apache.hcatalog.rcfile.RCFileOutputDriver') ";
-    LOG.info("Creating table:\n {}", createTable);
-    CommandProcessorResponse result = driver.run(createTable);
-    int retCode = result.getResponseCode();
-    if(retCode != 0) {
-      throw new IOException("Failed to create table. ["+createTable+"], return code from hive driver : ["+retCode+" "+result.getErrorMessage()+"]");
+
+    private void createTable(String tablename, String schema, String partitionedBy) throws IOException, CommandNeedRetryException {
+        String createTable;
+        createTable = "create table " + tablename + "(" + schema + ") ";
+        if ((partitionedBy != null) && (!partitionedBy.trim().isEmpty())) {
+            createTable = createTable + "partitioned by (" + partitionedBy + ") ";
+        }
+        createTable = createTable + "stored as RCFILE tblproperties('hcat.isd'='org.apache.hcatalog.rcfile.RCFileInputDriver'," +
+            "'hcat.osd'='org.apache.hcatalog.rcfile.RCFileOutputDriver') ";
+        LOG.info("Creating table:\n {}", createTable);
+        CommandProcessorResponse result = driver.run(createTable);
+        int retCode = result.getResponseCode();
+        if (retCode != 0) {
+            throw new IOException("Failed to create table. [" + createTable + "], return code from hive driver : [" + retCode + " " + result.getErrorMessage() + "]");
+        }
     }
-  }
 
-  private void createTable(String tablename, String schema) throws IOException, CommandNeedRetryException{
-    createTable(tablename,schema,null);
-  }
+    private void createTable(String tablename, String schema) throws IOException, CommandNeedRetryException {
+        createTable(tablename, schema, null);
+    }
 
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
+    @BeforeClass
+    public static void setUpBeforeClass() throws Exception {
 
-    HiveConf hiveConf = new HiveConf(TestHCatLoaderComplexSchema.class  );
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    driver = new Driver(hiveConf);
-    SessionState.start(new CliSessionState(hiveConf));
-    //props = new Properties();
-    //props.setProperty("fs.default.name", cluster.getProperties().getProperty("fs.default.name"));
+        HiveConf hiveConf = new HiveConf(TestHCatLoaderComplexSchema.class);
+        hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+        hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+        hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+        driver = new Driver(hiveConf);
+        SessionState.start(new CliSessionState(hiveConf));
+        //props = new Properties();
+        //props.setProperty("fs.default.name", cluster.getProperties().getProperty("fs.default.name"));
 
-  }
+    }
 
-  private static final TupleFactory tf = TupleFactory.getInstance();
-  private static final BagFactory bf = BagFactory.getInstance();
-  private Tuple t(Object... objects) {
-    return tf.newTuple(Arrays.asList(objects));
-  }
-  private DataBag b(Tuple... objects) {
-    return bf.newDefaultBag(Arrays.asList(objects));
-  }
+    private static final TupleFactory tf = TupleFactory.getInstance();
+    private static final BagFactory bf = BagFactory.getInstance();
 
-  /**
-   * artificially complex nested schema to test nested schema conversion
-   * @throws Exception
-   */
-  @Test
-  public void testSyntheticComplexSchema() throws Exception {
-    String pigSchema =
-        "(" +
-          "a: " +
+    private Tuple t(Object... objects) {
+        return tf.newTuple(Arrays.asList(objects));
+    }
+
+    private DataBag b(Tuple... objects) {
+        return bf.newDefaultBag(Arrays.asList(objects));
+    }
+
+    /**
+     * artificially complex nested schema to test nested schema conversion
+     * @throws Exception
+     */
+    @Test
+    public void testSyntheticComplexSchema() throws Exception {
+        String pigSchema =
             "(" +
-              "aa: chararray, " +
-              "ab: long, " +
-              "ac: map[], " +
-              "ad: { t: (ada: long) }, " +
-              "ae: { t: (aea:long, aeb: ( aeba: chararray, aebb: long)) }," +
-              "af: (afa: chararray, afb: long) " +
-            ")," +
-           "b: chararray, " +
-           "c: long, " +
-           "d:  { t: (da:long, db: ( dba: chararray, dbb: long), dc: { t: (dca: long) } ) } " +
-         ")";
+                "a: " +
+                "(" +
+                "aa: chararray, " +
+                "ab: long, " +
+                "ac: map[], " +
+                "ad: { t: (ada: long) }, " +
+                "ae: { t: (aea:long, aeb: ( aeba: chararray, aebb: long)) }," +
+                "af: (afa: chararray, afb: long) " +
+                ")," +
+                "b: chararray, " +
+                "c: long, " +
+                "d:  { t: (da:long, db: ( dba: chararray, dbb: long), dc: { t: (dca: long) } ) } " +
+                ")";
 
-    // with extra structs
-    String tableSchema =
-        "a struct<" +
-            "aa: string, " +
-            "ab: bigint, " +
-            "ac: map<string, string>, " +
-            "ad: array<struct<ada:bigint>>, " +
-            "ae: array<struct<aea:bigint, aeb: struct<aeba: string, aebb: bigint>>>," +
-            "af: struct<afa: string, afb: bigint> " +
-            ">, " +
-        "b string, " +
-        "c bigint, " +
-        "d array<struct<da: bigint, db: struct<dba:string, dbb:bigint>, dc: array<struct<dca: bigint>>>>";
+        // with extra structs
+        String tableSchema =
+            "a struct<" +
+                "aa: string, " +
+                "ab: bigint, " +
+                "ac: map<string, string>, " +
+                "ad: array<struct<ada:bigint>>, " +
+                "ae: array<struct<aea:bigint, aeb: struct<aeba: string, aebb: bigint>>>," +
+                "af: struct<afa: string, afb: bigint> " +
+                ">, " +
+                "b string, " +
+                "c bigint, " +
+                "d array<struct<da: bigint, db: struct<dba:string, dbb:bigint>, dc: array<struct<dca: bigint>>>>";
 
-    // without extra structs
-    String tableSchema2 =
-        "a struct<" +
-            "aa: string, " +
-            "ab: bigint, " +
-            "ac: map<string, string>, " +
-            "ad: array<bigint>, " +
-            "ae: array<struct<aea:bigint, aeb: struct<aeba: string, aebb: bigint>>>," +
-            "af: struct<afa: string, afb: bigint> " +
-            ">, " +
-        "b string, " +
-        "c bigint, " +
-        "d array<struct<da: bigint, db: struct<dba:string, dbb:bigint>, dc: array<bigint>>>";
+        // without extra structs
+        String tableSchema2 =
+            "a struct<" +
+                "aa: string, " +
+                "ab: bigint, " +
+                "ac: map<string, string>, " +
+                "ad: array<bigint>, " +
+                "ae: array<struct<aea:bigint, aeb: struct<aeba: string, aebb: bigint>>>," +
+                "af: struct<afa: string, afb: bigint> " +
+                ">, " +
+                "b string, " +
+                "c bigint, " +
+                "d array<struct<da: bigint, db: struct<dba:string, dbb:bigint>, dc: array<bigint>>>";
 
-    List<Tuple> data = new ArrayList<Tuple>();
-    for (int i = 0; i < 10; i++) {
-      Tuple t = t(
-          t(
-              "aa test",
-              2l,
-              new HashMap<String, String>() {{put("ac test1", "test 1");put("ac test2", "test 2");}},
-              b(t(3l), t(4l)),
-              b(t(5l, t("aeba test", 6l))),
-              t("afa test", 7l)
-          ),
-          "b test",
-          (long)i,
-          b(t(8l, t("dba test", 9l), b(t(10l)))));
+        List<Tuple> data = new ArrayList<Tuple>();
+        for (int i = 0; i < 10; i++) {
+            Tuple t = t(
+                t(
+                    "aa test",
+                    2l,
+                    new HashMap<String, String>() {
+                        {
+                            put("ac test1", "test 1");
+                            put("ac test2", "test 2");
+                        }
+                    },
+                    b(t(3l), t(4l)),
+                    b(t(5l, t("aeba test", 6l))),
+                    t("afa test", 7l)
+                ),
+                "b test",
+                (long) i,
+                b(t(8l, t("dba test", 9l), b(t(10l)))));
 
-      data.add(t);
+            data.add(t);
+        }
+        verifyWriteRead("testSyntheticComplexSchema", pigSchema, tableSchema, data, true);
+        verifyWriteRead("testSyntheticComplexSchema", pigSchema, tableSchema, data, false);
+        verifyWriteRead("testSyntheticComplexSchema2", pigSchema, tableSchema2, data, true);
+        verifyWriteRead("testSyntheticComplexSchema2", pigSchema, tableSchema2, data, false);
+
     }
-    verifyWriteRead("testSyntheticComplexSchema", pigSchema, tableSchema, data, true);
-    verifyWriteRead("testSyntheticComplexSchema", pigSchema, tableSchema, data, false);
-    verifyWriteRead("testSyntheticComplexSchema2", pigSchema, tableSchema2, data, true);
-    verifyWriteRead("testSyntheticComplexSchema2", pigSchema, tableSchema2, data, false);
 
-  }
+    private void verifyWriteRead(String tablename, String pigSchema, String tableSchema, List<Tuple> data, boolean provideSchemaToStorer)
+        throws IOException, CommandNeedRetryException, ExecException, FrontendException {
+        MockLoader.setData(tablename + "Input", data);
+        try {
+            createTable(tablename, tableSchema);
+            PigServer server = new PigServer(ExecType.LOCAL);
+            server.setBatchOn();
+            server.registerQuery("A = load '" + tablename + "Input' using org.apache.hcatalog.pig.MockLoader() AS " + pigSchema + ";");
+            Schema dumpedASchema = server.dumpSchema("A");
+            server.registerQuery("STORE A into '" + tablename + "' using org.apache.hcatalog.pig.HCatStorer("
+                + (provideSchemaToStorer ? "'', '" + pigSchema + "'" : "")
+                + ");");
 
-  private void verifyWriteRead(String tablename, String pigSchema, String tableSchema, List<Tuple> data, boolean provideSchemaToStorer)
-      throws IOException, CommandNeedRetryException, ExecException, FrontendException {
-    MockLoader.setData(tablename+"Input", data);
-    try {
-      createTable(tablename, tableSchema);
-      PigServer server = new PigServer(ExecType.LOCAL);
-      server.setBatchOn();
-      server.registerQuery("A = load '"+tablename+"Input' using org.apache.hcatalog.pig.MockLoader() AS "+pigSchema+";");
-      Schema dumpedASchema = server.dumpSchema("A");
-      server.registerQuery("STORE A into '"+tablename+"' using org.apache.hcatalog.pig.HCatStorer("
-          + (provideSchemaToStorer ? "'', '"+pigSchema+"'" : "")
-          + ");");
+            ExecJob execJob = server.executeBatch().get(0);
+            if (!execJob.getStatistics().isSuccessful()) {
+                throw new RuntimeException("Import failed", execJob.getException());
+            }
+            // test that schema was loaded correctly
+            server.registerQuery("X = load '" + tablename + "' using org.apache.hcatalog.pig.HCatLoader();");
+            server.dumpSchema("X");
+            Iterator<Tuple> it = server.openIterator("X");
+            int i = 0;
+            while (it.hasNext()) {
+                Tuple input = data.get(i++);
+                Tuple output = it.next();
+                Assert.assertEquals(input.toString(), output.toString());
+                LOG.info("tuple : {} ", output);
+            }
+            Schema dumpedXSchema = server.dumpSchema("X");
 
-      ExecJob execJob = server.executeBatch().get(0);
-      if (!execJob.getStatistics().isSuccessful()) {
-        throw new RuntimeException("Import failed", execJob.getException());
-      }
-      // test that schema was loaded correctly
-      server.registerQuery("X = load '"+tablename+"' using org.apache.hcatalog.pig.HCatLoader();");
-      server.dumpSchema("X");
-      Iterator<Tuple> it = server.openIterator("X");
-      int i = 0;
-      while (it.hasNext()) {
-        Tuple input = data.get(i++);
-        Tuple output = it.next();
-        Assert.assertEquals(input.toString(), output.toString());
-        LOG.info("tuple : {} ",output);
-      }
-      Schema dumpedXSchema = server.dumpSchema("X");
+            Assert.assertEquals(
+                "expected " + dumpedASchema + " but was " + dumpedXSchema + " (ignoring field names)",
+                "",
+                compareIgnoreFiledNames(dumpedASchema, dumpedXSchema));
 
-      Assert.assertEquals(
-          "expected " + dumpedASchema + " but was " + dumpedXSchema + " (ignoring field names)",
-          "",
-          compareIgnoreFiledNames(dumpedASchema, dumpedXSchema));
-
-    } finally {
-      dropTable(tablename);
+        } finally {
+            dropTable(tablename);
+        }
     }
-  }
 
-  private String compareIgnoreFiledNames(Schema expected, Schema got) throws FrontendException {
-    if (expected == null || got == null) {
-      if (expected == got) {
-        return "";
-      } else {
-        return "\nexpected "+expected+" got "+got;
-      }
+    private String compareIgnoreFiledNames(Schema expected, Schema got) throws FrontendException {
+        if (expected == null || got == null) {
+            if (expected == got) {
+                return "";
+            } else {
+                return "\nexpected " + expected + " got " + got;
+            }
+        }
+        if (expected.size() != got.size()) {
+            return "\nsize expected " + expected.size() + " (" + expected + ") got " + got.size() + " (" + got + ")";
+        }
+        String message = "";
+        for (int i = 0; i < expected.size(); i++) {
+            FieldSchema expectedField = expected.getField(i);
+            FieldSchema gotField = got.getField(i);
+            if (expectedField.type != gotField.type) {
+                message += "\ntype expected " + expectedField.type + " (" + expectedField + ") got " + gotField.type + " (" + gotField + ")";
+            } else {
+                message += compareIgnoreFiledNames(expectedField.schema, gotField.schema);
+            }
+        }
+        return message;
     }
-    if (expected.size() != got.size()) {
-      return "\nsize expected "+expected.size()+" ("+expected+") got "+got.size()+" ("+got+")";
+
+    /**
+     * tests that unnecessary tuples are drop while converting schema
+     * (Pig requires Tuples in Bags)
+     * @throws Exception
+     */
+    @Test
+    public void testTupleInBagInTupleInBag() throws Exception {
+        String pigSchema = "(a: { b : ( c: { d: (i : long) } ) })";
+
+        String tableSchema = "a array< array< bigint > >";
+
+        List<Tuple> data = new ArrayList<Tuple>();
+        data.add(t(b(t(b(t(100l), t(101l))), t(b(t(110l))))));
+        data.add(t(b(t(b(t(200l))), t(b(t(210l))), t(b(t(220l))))));
+        data.add(t(b(t(b(t(300l), t(301l))))));
+        data.add(t(b(t(b(t(400l))), t(b(t(410l), t(411l), t(412l))))));
+
+
+        verifyWriteRead("TupleInBagInTupleInBag1", pigSchema, tableSchema, data, true);
+        verifyWriteRead("TupleInBagInTupleInBag2", pigSchema, tableSchema, data, false);
+
+        // test that we don't drop the unnecessary tuple if the table has the corresponding Struct
+        String tableSchema2 = "a array< struct< c: array< struct< i: bigint > > > >";
+
+        verifyWriteRead("TupleInBagInTupleInBag3", pigSchema, tableSchema2, data, true);
+        verifyWriteRead("TupleInBagInTupleInBag4", pigSchema, tableSchema2, data, false);
+
     }
-    String message = "";
-    for (int i = 0; i < expected.size(); i++) {
-      FieldSchema expectedField = expected.getField(i);
-      FieldSchema gotField = got.getField(i);
-      if (expectedField.type != gotField.type) {
-        message += "\ntype expected "+expectedField.type+" ("+expectedField+") got "+gotField.type+" ("+gotField+")";
-      } else {
-        message += compareIgnoreFiledNames(expectedField.schema, gotField.schema);
-      }
+
+    @Test
+    public void testMapWithComplexData() throws Exception {
+        String pigSchema = "(a: long, b: map[])";
+        String tableSchema = "a bigint, b map<string, struct<aa:bigint, ab:string>>";
+
+        List<Tuple> data = new ArrayList<Tuple>();
+        for (int i = 0; i < 10; i++) {
+            Tuple t = t(
+                (long) i,
+                new HashMap<String, Object>() {
+                    {
+                        put("b test 1", t(1l, "test 1"));
+                        put("b test 2", t(2l, "test 2"));
+                    }
+                });
+
+            data.add(t);
+        }
+        verifyWriteRead("testMapWithComplexData", pigSchema, tableSchema, data, true);
+        verifyWriteRead("testMapWithComplexData2", pigSchema, tableSchema, data, false);
+
     }
-    return message;
-  }
-
-  /**
-   * tests that unnecessary tuples are drop while converting schema
-   * (Pig requires Tuples in Bags)
-   * @throws Exception
-   */
-  @Test
-  public void testTupleInBagInTupleInBag() throws Exception {
-    String pigSchema = "(a: { b : ( c: { d: (i : long) } ) })";
-
-    String tableSchema = "a array< array< bigint > >";
-
-    List<Tuple> data = new ArrayList<Tuple>();
-    data.add(t(b(t(b(t(100l),t(101l))), t(b(t(110l))))));
-    data.add(t(b(t(b(t(200l))), t(b(t(210l))), t(b(t(220l))))));
-    data.add(t(b(t(b(t(300l),t(301l))))));
-    data.add(t(b(t(b(t(400l))), t(b(t(410l),t(411l),t(412l))))));
-
-
-    verifyWriteRead("TupleInBagInTupleInBag1", pigSchema, tableSchema, data, true);
-    verifyWriteRead("TupleInBagInTupleInBag2", pigSchema, tableSchema, data, false);
-
-    // test that we don't drop the unnecessary tuple if the table has the corresponding Struct
-    String tableSchema2 = "a array< struct< c: array< struct< i: bigint > > > >";
-
-    verifyWriteRead("TupleInBagInTupleInBag3", pigSchema, tableSchema2, data, true);
-    verifyWriteRead("TupleInBagInTupleInBag4", pigSchema, tableSchema2, data, false);
-
-  }
-
-  @Test
-  public void testMapWithComplexData() throws Exception {
-    String pigSchema = "(a: long, b: map[])";
-    String tableSchema = "a bigint, b map<string, struct<aa:bigint, ab:string>>";
-
-    List<Tuple> data = new ArrayList<Tuple>();
-    for (int i = 0; i < 10; i++) {
-      Tuple t = t(
-          (long)i,
-          new HashMap<String, Object>() {{put("b test 1", t(1l,"test 1"));put("b test 2", t(2l, "test 2"));}});
-
-      data.add(t);
-    }
-  verifyWriteRead("testMapWithComplexData", pigSchema, tableSchema, data, true);
-  verifyWriteRead("testMapWithComplexData2", pigSchema, tableSchema, data, false);
-
-  }
- }
+}
diff --git a/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatLoaderStorer.java b/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatLoaderStorer.java
index ed04581..e6cb139 100644
--- a/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatLoaderStorer.java
+++ b/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatLoaderStorer.java
@@ -40,101 +40,101 @@
  */
 public class TestHCatLoaderStorer extends HCatBaseTest {
 
-  /**
-   * Ensure Pig can read/write tinyint/smallint columns.
-   */
-  @Test
-  public void testSmallTinyInt() throws Exception {
+    /**
+     * Ensure Pig can read/write tinyint/smallint columns.
+     */
+    @Test
+    public void testSmallTinyInt() throws Exception {
 
-    String readTblName = "test_small_tiny_int";
-    File dataDir = new File(TEST_DATA_DIR + "/testSmallTinyIntData");
-    File dataFile = new File(dataDir, "testSmallTinyInt.tsv");
+        String readTblName = "test_small_tiny_int";
+        File dataDir = new File(TEST_DATA_DIR + "/testSmallTinyIntData");
+        File dataFile = new File(dataDir, "testSmallTinyInt.tsv");
 
-    String writeTblName = "test_small_tiny_int_write";
-    File writeDataFile = new File(TEST_DATA_DIR, writeTblName + ".tsv");
+        String writeTblName = "test_small_tiny_int_write";
+        File writeDataFile = new File(TEST_DATA_DIR, writeTblName + ".tsv");
 
-    FileUtil.fullyDelete(dataDir); // Might not exist
-    Assert.assertTrue(dataDir.mkdir());
+        FileUtil.fullyDelete(dataDir); // Might not exist
+        Assert.assertTrue(dataDir.mkdir());
 
-    HcatTestUtils.createTestDataFile(dataFile.getAbsolutePath(), new String[]{
-        String.format("%d\t%d", Short.MIN_VALUE, Byte.MIN_VALUE),
-        String.format("%d\t%d", Short.MAX_VALUE, Byte.MAX_VALUE)
-    });
+        HcatTestUtils.createTestDataFile(dataFile.getAbsolutePath(), new String[]{
+            String.format("%d\t%d", Short.MIN_VALUE, Byte.MIN_VALUE),
+            String.format("%d\t%d", Short.MAX_VALUE, Byte.MAX_VALUE)
+        });
 
-    // Create a table with smallint/tinyint columns, load data, and query from Hive.
-    Assert.assertEquals(0, driver.run("drop table if exists " + readTblName).getResponseCode());
-    Assert.assertEquals(0, driver.run("create external table " + readTblName +
-        " (my_small_int smallint, my_tiny_int tinyint)" +
-        " row format delimited fields terminated by '\t' stored as textfile").getResponseCode());
-    Assert.assertEquals(0, driver.run("load data local inpath '" +
-        dataDir.getAbsolutePath() + "' into table " + readTblName).getResponseCode());
+        // Create a table with smallint/tinyint columns, load data, and query from Hive.
+        Assert.assertEquals(0, driver.run("drop table if exists " + readTblName).getResponseCode());
+        Assert.assertEquals(0, driver.run("create external table " + readTblName +
+            " (my_small_int smallint, my_tiny_int tinyint)" +
+            " row format delimited fields terminated by '\t' stored as textfile").getResponseCode());
+        Assert.assertEquals(0, driver.run("load data local inpath '" +
+            dataDir.getAbsolutePath() + "' into table " + readTblName).getResponseCode());
 
-    PigServer server = new PigServer(ExecType.LOCAL);
-    server.registerQuery(
-        "data = load '" + readTblName + "' using org.apache.hcatalog.pig.HCatLoader();");
+        PigServer server = new PigServer(ExecType.LOCAL);
+        server.registerQuery(
+            "data = load '" + readTblName + "' using org.apache.hcatalog.pig.HCatLoader();");
 
-    // Ensure Pig schema is correct.
-    Schema schema = server.dumpSchema("data");
-    Assert.assertEquals(2, schema.getFields().size());
-    Assert.assertEquals("my_small_int", schema.getField(0).alias);
-    Assert.assertEquals(DataType.INTEGER, schema.getField(0).type);
-    Assert.assertEquals("my_tiny_int", schema.getField(1).alias);
-    Assert.assertEquals(DataType.INTEGER, schema.getField(1).type);
+        // Ensure Pig schema is correct.
+        Schema schema = server.dumpSchema("data");
+        Assert.assertEquals(2, schema.getFields().size());
+        Assert.assertEquals("my_small_int", schema.getField(0).alias);
+        Assert.assertEquals(DataType.INTEGER, schema.getField(0).type);
+        Assert.assertEquals("my_tiny_int", schema.getField(1).alias);
+        Assert.assertEquals(DataType.INTEGER, schema.getField(1).type);
 
-    // Ensure Pig can read data correctly.
-    Iterator<Tuple> it = server.openIterator("data");
-    Tuple t = it.next();
-    Assert.assertEquals(new Integer(Short.MIN_VALUE), t.get(0));
-    Assert.assertEquals(new Integer(Byte.MIN_VALUE), t.get(1));
-    t = it.next();
-    Assert.assertEquals(new Integer(Short.MAX_VALUE), t.get(0));
-    Assert.assertEquals(new Integer(Byte.MAX_VALUE), t.get(1));
-    Assert.assertFalse(it.hasNext());
+        // Ensure Pig can read data correctly.
+        Iterator<Tuple> it = server.openIterator("data");
+        Tuple t = it.next();
+        Assert.assertEquals(new Integer(Short.MIN_VALUE), t.get(0));
+        Assert.assertEquals(new Integer(Byte.MIN_VALUE), t.get(1));
+        t = it.next();
+        Assert.assertEquals(new Integer(Short.MAX_VALUE), t.get(0));
+        Assert.assertEquals(new Integer(Byte.MAX_VALUE), t.get(1));
+        Assert.assertFalse(it.hasNext());
 
-    // Ensure Pig can write correctly to smallint/tinyint columns. This means values within the
-    // bounds of the column type are written, and values outside throw an exception.
-    Assert.assertEquals(0, driver.run("drop table if exists " + writeTblName).getResponseCode());
-    Assert.assertEquals(0, driver.run("create table " + writeTblName +
-        " (my_small_int smallint, my_tiny_int tinyint) stored as rcfile").getResponseCode());
+        // Ensure Pig can write correctly to smallint/tinyint columns. This means values within the
+        // bounds of the column type are written, and values outside throw an exception.
+        Assert.assertEquals(0, driver.run("drop table if exists " + writeTblName).getResponseCode());
+        Assert.assertEquals(0, driver.run("create table " + writeTblName +
+            " (my_small_int smallint, my_tiny_int tinyint) stored as rcfile").getResponseCode());
 
-    // Values within the column type bounds.
-    HcatTestUtils.createTestDataFile(writeDataFile.getAbsolutePath(), new String[]{
-        String.format("%d\t%d", Short.MIN_VALUE, Byte.MIN_VALUE),
-        String.format("%d\t%d", Short.MAX_VALUE, Byte.MAX_VALUE)
-    });
-    smallTinyIntBoundsCheckHelper(writeDataFile.getAbsolutePath(), ExecJob.JOB_STATUS.COMPLETED);
+        // Values within the column type bounds.
+        HcatTestUtils.createTestDataFile(writeDataFile.getAbsolutePath(), new String[]{
+            String.format("%d\t%d", Short.MIN_VALUE, Byte.MIN_VALUE),
+            String.format("%d\t%d", Short.MAX_VALUE, Byte.MAX_VALUE)
+        });
+        smallTinyIntBoundsCheckHelper(writeDataFile.getAbsolutePath(), ExecJob.JOB_STATUS.COMPLETED);
 
-    // Values outside the column type bounds will fail at runtime.
-    HcatTestUtils.createTestDataFile(TEST_DATA_DIR + "/shortTooSmall.tsv", new String[]{
-        String.format("%d\t%d", Short.MIN_VALUE - 1, 0)});
-    smallTinyIntBoundsCheckHelper(TEST_DATA_DIR + "/shortTooSmall.tsv", ExecJob.JOB_STATUS.FAILED);
+        // Values outside the column type bounds will fail at runtime.
+        HcatTestUtils.createTestDataFile(TEST_DATA_DIR + "/shortTooSmall.tsv", new String[]{
+            String.format("%d\t%d", Short.MIN_VALUE - 1, 0)});
+        smallTinyIntBoundsCheckHelper(TEST_DATA_DIR + "/shortTooSmall.tsv", ExecJob.JOB_STATUS.FAILED);
 
-    HcatTestUtils.createTestDataFile(TEST_DATA_DIR + "/shortTooBig.tsv", new String[]{
-        String.format("%d\t%d", Short.MAX_VALUE + 1, 0)});
-    smallTinyIntBoundsCheckHelper(TEST_DATA_DIR + "/shortTooBig.tsv", ExecJob.JOB_STATUS.FAILED);
+        HcatTestUtils.createTestDataFile(TEST_DATA_DIR + "/shortTooBig.tsv", new String[]{
+            String.format("%d\t%d", Short.MAX_VALUE + 1, 0)});
+        smallTinyIntBoundsCheckHelper(TEST_DATA_DIR + "/shortTooBig.tsv", ExecJob.JOB_STATUS.FAILED);
 
-    HcatTestUtils.createTestDataFile(TEST_DATA_DIR + "/byteTooSmall.tsv", new String[]{
-        String.format("%d\t%d", 0, Byte.MIN_VALUE - 1)});
-    smallTinyIntBoundsCheckHelper(TEST_DATA_DIR + "/byteTooSmall.tsv", ExecJob.JOB_STATUS.FAILED);
+        HcatTestUtils.createTestDataFile(TEST_DATA_DIR + "/byteTooSmall.tsv", new String[]{
+            String.format("%d\t%d", 0, Byte.MIN_VALUE - 1)});
+        smallTinyIntBoundsCheckHelper(TEST_DATA_DIR + "/byteTooSmall.tsv", ExecJob.JOB_STATUS.FAILED);
 
-    HcatTestUtils.createTestDataFile(TEST_DATA_DIR + "/byteTooBig.tsv", new String[]{
-        String.format("%d\t%d", 0, Byte.MAX_VALUE + 1)});
-    smallTinyIntBoundsCheckHelper(TEST_DATA_DIR + "/byteTooBig.tsv", ExecJob.JOB_STATUS.FAILED);
-  }
+        HcatTestUtils.createTestDataFile(TEST_DATA_DIR + "/byteTooBig.tsv", new String[]{
+            String.format("%d\t%d", 0, Byte.MAX_VALUE + 1)});
+        smallTinyIntBoundsCheckHelper(TEST_DATA_DIR + "/byteTooBig.tsv", ExecJob.JOB_STATUS.FAILED);
+    }
 
-  private void smallTinyIntBoundsCheckHelper(String data, ExecJob.JOB_STATUS expectedStatus)
-      throws Exception {
-    Assert.assertEquals(0, driver.run("drop table if exists test_tbl").getResponseCode());
-    Assert.assertEquals(0, driver.run("create table test_tbl" +
-        " (my_small_int smallint, my_tiny_int tinyint) stored as rcfile").getResponseCode());
+    private void smallTinyIntBoundsCheckHelper(String data, ExecJob.JOB_STATUS expectedStatus)
+        throws Exception {
+        Assert.assertEquals(0, driver.run("drop table if exists test_tbl").getResponseCode());
+        Assert.assertEquals(0, driver.run("create table test_tbl" +
+            " (my_small_int smallint, my_tiny_int tinyint) stored as rcfile").getResponseCode());
 
-    PigServer server = new PigServer(ExecType.LOCAL);
-    server.setBatchOn();
-    server.registerQuery("data = load '" + data +
-        "' using PigStorage('\t') as (my_small_int:int, my_tiny_int:int);");
-    server.registerQuery(
-        "store data into 'test_tbl' using org.apache.hcatalog.pig.HCatStorer();");
-    List<ExecJob> jobs = server.executeBatch();
-    Assert.assertEquals(expectedStatus, jobs.get(0).getStatus());
-  }
+        PigServer server = new PigServer(ExecType.LOCAL);
+        server.setBatchOn();
+        server.registerQuery("data = load '" + data +
+            "' using PigStorage('\t') as (my_small_int:int, my_tiny_int:int);");
+        server.registerQuery(
+            "store data into 'test_tbl' using org.apache.hcatalog.pig.HCatStorer();");
+        List<ExecJob> jobs = server.executeBatch();
+        Assert.assertEquals(expectedStatus, jobs.get(0).getStatus());
+    }
 }
diff --git a/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatStorer.java b/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatStorer.java
index c53eacf..2f2c9cb 100644
--- a/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatStorer.java
+++ b/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatStorer.java
@@ -37,562 +37,560 @@
 
 public class TestHCatStorer extends HCatBaseTest {
 
-  private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data";
-
-  @Test
-  public void testPartColsInData() throws IOException, CommandNeedRetryException{
-
-    driver.run("drop table junit_unparted");
-    String createTable = "create table junit_unparted(a int) partitioned by (b string) stored as RCFILE";
-    int retCode = driver.run(createTable).getResponseCode();
-    if(retCode != 0) {
-      throw new IOException("Failed to create table.");
-    }
-    int LOOP_SIZE = 11;
-    String[] input = new String[LOOP_SIZE];
-    for(int i = 0; i < LOOP_SIZE; i++) {
-        input[i] = i + "\t1";
-    }
-    HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input);
-    PigServer server = new PigServer(ExecType.LOCAL);
-    server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (a:int, b:chararray);");
-    server.registerQuery("store A into 'default.junit_unparted' using "+HCatStorer.class.getName()+"('b=1');");
-    server.registerQuery("B = load 'default.junit_unparted' using "+HCatLoader.class.getName()+"();");
-    Iterator<Tuple> itr= server.openIterator("B");
-
-    int i = 0;
-
-    while(itr.hasNext()){
-      Tuple t = itr.next();
-      Assert.assertEquals(2, t.size());
-      Assert.assertEquals(t.get(0), i);
-      Assert.assertEquals(t.get(1), "1");
-      i++;
-    }
-
-    Assert.assertFalse(itr.hasNext());
-    Assert.assertEquals(11, i);
-  }
-
-  @Test
-  public void testMultiPartColsInData() throws IOException, CommandNeedRetryException{
-
-    driver.run("drop table employee");
-    String createTable = "CREATE TABLE employee (emp_id INT, emp_name STRING, emp_start_date STRING , emp_gender STRING ) " +
-        " PARTITIONED BY (emp_country STRING , emp_state STRING ) STORED AS RCFILE";
-
-    int retCode = driver.run(createTable).getResponseCode();
-    if(retCode != 0) {
-      throw new IOException("Failed to create table.");
-    }
-
-    String[] inputData = {"111237\tKrishna\t01/01/1990\tM\tIN\tTN",
-                          "111238\tKalpana\t01/01/2000\tF\tIN\tKA",
-                          "111239\tSatya\t01/01/2001\tM\tIN\tKL",
-                          "111240\tKavya\t01/01/2002\tF\tIN\tAP"};
-
-    HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, inputData);
-    PigServer pig = new PigServer(ExecType.LOCAL);
-    pig.setBatchOn();
-    pig.registerQuery("A = LOAD '"+INPUT_FILE_NAME+"' USING PigStorage() AS (emp_id:int,emp_name:chararray,emp_start_date:chararray," +
-        "emp_gender:chararray,emp_country:chararray,emp_state:chararray);");
-    pig.registerQuery("TN = FILTER A BY emp_state == 'TN';");
-    pig.registerQuery("KA = FILTER A BY emp_state == 'KA';");
-    pig.registerQuery("KL = FILTER A BY emp_state == 'KL';");
-    pig.registerQuery("AP = FILTER A BY emp_state == 'AP';");
-    pig.registerQuery("STORE TN INTO 'employee' USING "+HCatStorer.class.getName()+"('emp_country=IN,emp_state=TN');");
-    pig.registerQuery("STORE KA INTO 'employee' USING "+HCatStorer.class.getName()+"('emp_country=IN,emp_state=KA');");
-    pig.registerQuery("STORE KL INTO 'employee' USING "+HCatStorer.class.getName()+"('emp_country=IN,emp_state=KL');");
-    pig.registerQuery("STORE AP INTO 'employee' USING "+HCatStorer.class.getName()+"('emp_country=IN,emp_state=AP');");
-    pig.executeBatch();
-    driver.run("select * from employee");
-    ArrayList<String> results = new ArrayList<String>();
-    driver.getResults(results);
-    Assert.assertEquals(4, results.size());
-    Collections.sort(results);
-    Assert.assertEquals(inputData[0], results.get(0));
-    Assert.assertEquals(inputData[1], results.get(1));
-    Assert.assertEquals(inputData[2], results.get(2));
-    Assert.assertEquals(inputData[3], results.get(3));
-    driver.run("drop table employee");
-  }
-
-  @Test
-  public void testStoreInPartiitonedTbl() throws IOException, CommandNeedRetryException{
-
-    driver.run("drop table junit_unparted");
-    String createTable = "create table junit_unparted(a int) partitioned by (b string) stored as RCFILE";
-    int retCode = driver.run(createTable).getResponseCode();
-    if(retCode != 0) {
-      throw new IOException("Failed to create table.");
-    }
-    int LOOP_SIZE = 11;
-    String[] input = new String[LOOP_SIZE];
-    for(int i = 0; i < LOOP_SIZE; i++) {
-        input[i] = i+"";
-    }
-    HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input);
-    PigServer server = new PigServer(ExecType.LOCAL);
-    server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (a:int);");
-    server.registerQuery("store A into 'default.junit_unparted' using "+HCatStorer.class.getName()+"('b=1');");
-    server.registerQuery("B = load 'default.junit_unparted' using "+HCatLoader.class.getName()+"();");
-    Iterator<Tuple> itr= server.openIterator("B");
-
-    int i = 0;
-
-    while(itr.hasNext()){
-      Tuple t = itr.next();
-      Assert.assertEquals(2, t.size());
-      Assert.assertEquals(t.get(0), i);
-      Assert.assertEquals(t.get(1), "1");
-      i++;
-    }
-
-    Assert.assertFalse(itr.hasNext());
-    Assert.assertEquals(11, i);
-  }
-
-  @Test
-  public void testNoAlias() throws IOException, CommandNeedRetryException{
-    driver.run("drop table junit_parted");
-    String createTable = "create table junit_parted(a int, b string) partitioned by (ds string) stored as RCFILE";
-    int retCode = driver.run(createTable).getResponseCode();
-    if(retCode != 0) {
-      throw new IOException("Failed to create table.");
-    }
-    PigServer server = new PigServer(ExecType.LOCAL);
-    boolean errCaught = false;
-    try{
-      server.setBatchOn();
-      server.registerQuery("A = load '"+ INPUT_FILE_NAME +"' as (a:int, b:chararray);");
-      server.registerQuery("B = foreach A generate a+10, b;");
-      server.registerQuery("store B into 'junit_parted' using "+HCatStorer.class.getName()+"('ds=20100101');");
-      server.executeBatch();
-    }
-    catch(PigException fe){
-      PigException pe = LogUtils.getPigException(fe);
-      Assert.assertTrue(pe instanceof FrontendException);
-      Assert.assertEquals(PigHCatUtil.PIG_EXCEPTION_CODE, pe.getErrorCode());
-      Assert.assertTrue(pe.getMessage().contains("Column name for a field is not specified. Please provide the full schema as an argument to HCatStorer."));
-      errCaught = true;
-    }
-    Assert.assertTrue(errCaught);
-    errCaught = false;
-    try{
-      server.setBatchOn();
-      server.registerQuery("A = load '"+ INPUT_FILE_NAME +"' as (a:int, B:chararray);");
-      server.registerQuery("B = foreach A generate a, B;");
-      server.registerQuery("store B into 'junit_parted' using "+HCatStorer.class.getName()+"('ds=20100101');");
-      server.executeBatch();
-    }
-    catch(PigException fe){
-      PigException pe = LogUtils.getPigException(fe);
-      Assert.assertTrue(pe instanceof FrontendException);
-      Assert.assertEquals(PigHCatUtil.PIG_EXCEPTION_CODE, pe.getErrorCode());
-      Assert.assertTrue(pe.getMessage().contains("Column names should all be in lowercase. Invalid name found: B"));
-      errCaught = true;
-    }
-    driver.run("drop table junit_parted");
-    Assert.assertTrue(errCaught);
-  }
-
-  @Test
-  public void testStoreMultiTables() throws IOException, CommandNeedRetryException{
-
-    driver.run("drop table junit_unparted");
-    String createTable = "create table junit_unparted(a int, b string) stored as RCFILE";
-    int retCode = driver.run(createTable).getResponseCode();
-    if(retCode != 0) {
-      throw new IOException("Failed to create table.");
-    }
-    driver.run("drop table junit_unparted2");
-    createTable = "create table junit_unparted2(a int, b string) stored as RCFILE";
-    retCode = driver.run(createTable).getResponseCode();
-    if(retCode != 0) {
-      throw new IOException("Failed to create table.");
-    }
-
-    int LOOP_SIZE = 3;
-    String[] input = new String[LOOP_SIZE*LOOP_SIZE];
-    int k = 0;
-    for(int i = 1; i <= LOOP_SIZE; i++) {
-      String si = i + "";
-      for(int j=1;j<=LOOP_SIZE;j++) {
-        input[k++] = si + "\t"+j;
-      }
-    }
-    HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input);
-    PigServer server = new PigServer(ExecType.LOCAL);
-    server.setBatchOn();
-    server.registerQuery("A = load '"+ INPUT_FILE_NAME +"' as (a:int, b:chararray);");
-    server.registerQuery("B = filter A by a < 2;");
-    server.registerQuery("store B into 'junit_unparted' using "+HCatStorer.class.getName()+"();");
-    server.registerQuery("C = filter A by a >= 2;");
-    server.registerQuery("store C into 'junit_unparted2' using "+HCatStorer.class.getName()+"();");
-    server.executeBatch();
-
-    driver.run("select * from junit_unparted");
-    ArrayList<String> res = new ArrayList<String>();
-    driver.getResults(res);
-    driver.run("select * from junit_unparted2");
-    ArrayList<String> res2 = new ArrayList<String>();
-    driver.getResults(res2);
-
-    res.addAll(res2);
-    driver.run("drop table junit_unparted");
-    driver.run("drop table junit_unparted2");
-
-    Iterator<String> itr = res.iterator();
-    for(int i = 0; i < LOOP_SIZE*LOOP_SIZE; i++) {
-      Assert.assertEquals( input[i] ,itr.next());
-    }
-
-    Assert.assertFalse(itr.hasNext());
-
-  }
-
-  @Test
-  public void testStoreWithNoSchema() throws IOException, CommandNeedRetryException{
-
-    driver.run("drop table junit_unparted");
-    String createTable = "create table junit_unparted(a int, b string) stored as RCFILE";
-    int retCode = driver.run(createTable).getResponseCode();
-    if(retCode != 0) {
-      throw new IOException("Failed to create table.");
-    }
-
-    int LOOP_SIZE = 3;
-    String[] input = new String[LOOP_SIZE*LOOP_SIZE];
-    int k = 0;
-    for(int i = 1; i <= LOOP_SIZE; i++) {
-      String si = i + "";
-      for(int j=1;j<=LOOP_SIZE;j++) {
-        input[k++] = si + "\t"+j;
-      }
-    }
-    HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input);
-    PigServer server = new PigServer(ExecType.LOCAL);
-    server.setBatchOn();
-    server.registerQuery("A = load '"+ INPUT_FILE_NAME +"' as (a:int, b:chararray);");
-    server.registerQuery("store A into 'default.junit_unparted' using "+HCatStorer.class.getName()+"('');");
-    server.executeBatch();
-
-    driver.run("select * from junit_unparted");
-    ArrayList<String> res = new ArrayList<String>();
-    driver.getResults(res);
-    driver.run("drop table junit_unparted");
-    Iterator<String> itr = res.iterator();
-    for(int i = 0; i < LOOP_SIZE*LOOP_SIZE; i++) {
-      Assert.assertEquals( input[i] ,itr.next());
-    }
-
-    Assert.assertFalse(itr.hasNext());
-
-  }
-
-  @Test
-  public void testStoreWithNoCtorArgs() throws IOException, CommandNeedRetryException{
-
-    driver.run("drop table junit_unparted");
-    String createTable = "create table junit_unparted(a int, b string) stored as RCFILE";
-    int retCode = driver.run(createTable).getResponseCode();
-    if(retCode != 0) {
-      throw new IOException("Failed to create table.");
-    }
-
-    int LOOP_SIZE = 3;
-    String[] input = new String[LOOP_SIZE*LOOP_SIZE];
-    int k = 0;
-    for(int i = 1; i <= LOOP_SIZE; i++) {
-      String si = i + "";
-      for(int j=1;j<=LOOP_SIZE;j++) {
-        input[k++] = si + "\t"+j;
-      }
-    }
-    HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input);
-    PigServer server = new PigServer(ExecType.LOCAL);
-    server.setBatchOn();
-    server.registerQuery("A = load '"+ INPUT_FILE_NAME +"' as (a:int, b:chararray);");
-    server.registerQuery("store A into 'junit_unparted' using "+HCatStorer.class.getName()+"();");
-    server.executeBatch();
-
-    driver.run("select * from junit_unparted");
-    ArrayList<String> res = new ArrayList<String>();
-    driver.getResults(res);
-    driver.run("drop table junit_unparted");
-    Iterator<String> itr = res.iterator();
-    for(int i = 0; i < LOOP_SIZE*LOOP_SIZE; i++) {
-      Assert.assertEquals( input[i] ,itr.next());
-    }
-
-    Assert.assertFalse(itr.hasNext());
-
-  }
-
-  @Test
-  public void testEmptyStore() throws IOException, CommandNeedRetryException{
-
-    driver.run("drop table junit_unparted");
-    String createTable = "create table junit_unparted(a int, b string) stored as RCFILE";
-    int retCode = driver.run(createTable).getResponseCode();
-    if(retCode != 0) {
-      throw new IOException("Failed to create table.");
-    }
-
-    int LOOP_SIZE = 3;
-    String[] input = new String[LOOP_SIZE*LOOP_SIZE];
-    int k = 0;
-    for(int i = 1; i <= LOOP_SIZE; i++) {
-      String si = i + "";
-      for(int j=1;j<=LOOP_SIZE;j++) {
-        input[k++] = si + "\t"+j;
-      }
-    }
-    HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input);
-    PigServer server = new PigServer(ExecType.LOCAL);
-    server.setBatchOn();
-    server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (a:int, b:chararray);");
-    server.registerQuery("B = filter A by a > 100;");
-    server.registerQuery("store B into 'default.junit_unparted' using "+HCatStorer.class.getName()+"('','a:int,b:chararray');");
-    server.executeBatch();
-
-    driver.run("select * from junit_unparted");
-    ArrayList<String> res = new ArrayList<String>();
-    driver.getResults(res);
-    driver.run("drop table junit_unparted");
-    Iterator<String> itr = res.iterator();
-    Assert.assertFalse(itr.hasNext());
-
-  }
-
-  @Test
-  public void testBagNStruct() throws IOException, CommandNeedRetryException{
-  driver.run("drop table junit_unparted");
-  String createTable = "create table junit_unparted(b string,a struct<a1:int>,  arr_of_struct array<string>, " +
-      "arr_of_struct2 array<struct<s1:string,s2:string>>,  arr_of_struct3 array<struct<s3:string>>) stored as RCFILE";
-  int retCode = driver.run(createTable).getResponseCode();
-  if(retCode != 0) {
-    throw new IOException("Failed to create table.");
-  }
-
-  String[] inputData = new String[]{"zookeeper\t(2)\t{(pig)}\t{(pnuts,hdfs)}\t{(hadoop),(hcat)}",
-      "chubby\t(2)\t{(sawzall)}\t{(bigtable,gfs)}\t{(mapreduce),(hcat)}"};
-
-  HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, inputData);
-
-  PigServer server = new PigServer(ExecType.LOCAL);
-  server.setBatchOn();
-  server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (b:chararray, a:tuple(a1:int), arr_of_struct:bag{mytup:tuple(s1:chararray)}, arr_of_struct2:bag{mytup:tuple(s1:chararray,s2:chararray)}, arr_of_struct3:bag{t3:tuple(s3:chararray)});");
-  server.registerQuery("store A into 'default.junit_unparted' using "+HCatStorer.class.getName()+"('','b:chararray, a:tuple(a1:int)," +
-      " arr_of_struct:bag{mytup:tuple(s1:chararray)}, arr_of_struct2:bag{mytup:tuple(s1:chararray,s2:chararray)}, arr_of_struct3:bag{t3:tuple(s3:chararray)}');");
-  server.executeBatch();
-
-  driver.run("select * from junit_unparted");
-  ArrayList<String> res = new ArrayList<String>();
-  driver.getResults(res);
-  driver.run("drop table junit_unparted");
-  Iterator<String> itr = res.iterator();
-  Assert.assertEquals("zookeeper\t{\"a1\":2}\t[\"pig\"]\t[{\"s1\":\"pnuts\",\"s2\":\"hdfs\"}]\t[{\"s3\":\"hadoop\"},{\"s3\":\"hcat\"}]", itr.next());
-  Assert.assertEquals("chubby\t{\"a1\":2}\t[\"sawzall\"]\t[{\"s1\":\"bigtable\",\"s2\":\"gfs\"}]\t[{\"s3\":\"mapreduce\"},{\"s3\":\"hcat\"}]",itr.next());
- Assert.assertFalse(itr.hasNext());
-
-  }
-
-  @Test
-  public void testStoreFuncAllSimpleTypes() throws IOException, CommandNeedRetryException{
-
-    driver.run("drop table junit_unparted");
-    String createTable = "create table junit_unparted(a int, b float, c double, d bigint, e string, f binary, g binary) stored as RCFILE";
-    int retCode = driver.run(createTable).getResponseCode();
-    if(retCode != 0) {
-      throw new IOException("Failed to create table.");
-    }
-
-    int i =0;
-    String[] input = new String[3];
-    input[i++]= "0\t\t\t\t\t\t"; //Empty values except first column
-    input[i++]= "\t" + i * 2.1f +"\t"+ i*1.1d + "\t" + i * 2L +"\t"+"lets hcat"+"\tbinary-data"; //First column empty
-    input[i++]= i + "\t" + i * 2.1f +"\t"+ i*1.1d + "\t" + i * 2L +"\t"+"lets hcat"+"\tbinary-data";
-
-    HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input);
-    PigServer server = new PigServer(ExecType.LOCAL);
-    server.setBatchOn();
-    server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (a:int, b:float, c:double, d:long, e:chararray, f:bytearray);");
-    //null gets stored into column g which is a binary field.
-    server.registerQuery("store A into 'default.junit_unparted' using "+HCatStorer.class.getName()+"('','a:int, b:float, c:double, d:long, e:chararray,f:bytearray');");
-    server.executeBatch();
-
-
-    driver.run("select * from junit_unparted");
-    ArrayList<String> res = new ArrayList<String>();
-    driver.getResults(res);
-
-    Iterator<String> itr = res.iterator();
-    Assert.assertEquals( "0\tNULL\tNULL\tNULL\tNULL\tnull\tnull" ,itr.next());
-    Assert.assertEquals( "NULL\t4.2\t2.2\t4\tlets hcat\tbinary-data\tnull" ,itr.next());
-    Assert.assertEquals( "3\t6.2999997\t3.3000000000000003\t6\tlets hcat\tbinary-data\tnull",itr.next());
-    Assert.assertFalse(itr.hasNext());
-
-    server.registerQuery("B = load 'junit_unparted' using "+HCatLoader.class.getName()+";");
-    Iterator<Tuple> iter = server.openIterator("B");
-    int count = 0;
-    int num5nulls = 0;
-    while(iter.hasNext()){
-        Tuple t = iter.next();
-        if(t.get(5) == null){
-            num5nulls++;
-        }else {
-          Assert.assertTrue(t.get(5) instanceof DataByteArray);
-        }
-      Assert.assertNull(t.get(6));
-        count++;
-    }
-    Assert.assertEquals(3, count);
-    Assert.assertEquals(1, num5nulls);
-    driver.run("drop table junit_unparted");
-  }
-
-  @Test
-  public void testStoreFuncSimple() throws IOException, CommandNeedRetryException{
-
-    driver.run("drop table junit_unparted");
-    String createTable = "create table junit_unparted(a int, b string) stored as RCFILE";
-    int retCode = driver.run(createTable).getResponseCode();
-    if(retCode != 0) {
-      throw new IOException("Failed to create table.");
-    }
-
-    int LOOP_SIZE = 3;
-    String[] inputData = new String[LOOP_SIZE*LOOP_SIZE];
-    int k = 0;
-    for(int i = 1; i <= LOOP_SIZE; i++) {
-      String si = i + "";
-      for(int j=1;j<=LOOP_SIZE;j++) {
-        inputData[k++] = si + "\t"+j;
-      }
-    }
-    HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, inputData);
-    PigServer server = new PigServer(ExecType.LOCAL);
-    server.setBatchOn();
-    server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (a:int, b:chararray);");
-    server.registerQuery("store A into 'default.junit_unparted' using "+HCatStorer.class.getName()+"('','a:int,b:chararray');");
-    server.executeBatch();
-
-    driver.run("select * from junit_unparted");
-    ArrayList<String> res = new ArrayList<String>();
-    driver.getResults(res);
-    driver.run("drop table junit_unparted");
-    Iterator<String> itr = res.iterator();
-    for(int i = 1; i <= LOOP_SIZE; i++) {
-      String si = i + "";
-      for(int j=1;j<=LOOP_SIZE;j++) {
-        Assert.assertEquals( si + "\t"+j,itr.next());
-      }
-    }
-   Assert.assertFalse(itr.hasNext());
-
-  }
-
-  @Test
-  public void testDynamicPartitioningMultiPartColsInDataPartialSpec() throws IOException, CommandNeedRetryException{
-
-    driver.run("drop table if exists employee");
-    String createTable = "CREATE TABLE employee (emp_id INT, emp_name STRING, emp_start_date STRING , emp_gender STRING ) " +
-        " PARTITIONED BY (emp_country STRING , emp_state STRING ) STORED AS RCFILE";
-
-    int retCode = driver.run(createTable).getResponseCode();
-    if(retCode != 0) {
-      throw new IOException("Failed to create table.");
-    }
-
-    String[] inputData = {"111237\tKrishna\t01/01/1990\tM\tIN\tTN",
-                          "111238\tKalpana\t01/01/2000\tF\tIN\tKA",
-                          "111239\tSatya\t01/01/2001\tM\tIN\tKL",
-                          "111240\tKavya\t01/01/2002\tF\tIN\tAP"};
-
-    HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, inputData);
-    PigServer pig = new PigServer(ExecType.LOCAL);
-    pig.setBatchOn();
-    pig.registerQuery("A = LOAD '"+INPUT_FILE_NAME+"' USING PigStorage() AS (emp_id:int,emp_name:chararray,emp_start_date:chararray," +
-        "emp_gender:chararray,emp_country:chararray,emp_state:chararray);");
-    pig.registerQuery("IN = FILTER A BY emp_country == 'IN';");
-    pig.registerQuery("STORE IN INTO 'employee' USING "+HCatStorer.class.getName()+"('emp_country=IN');");
-    pig.executeBatch();
-    driver.run("select * from employee");
-    ArrayList<String> results = new ArrayList<String>();
-    driver.getResults(results);
-    Assert.assertEquals(4, results.size());
-    Collections.sort(results);
-    Assert.assertEquals(inputData[0], results.get(0));
-    Assert.assertEquals(inputData[1], results.get(1));
-    Assert.assertEquals(inputData[2], results.get(2));
-    Assert.assertEquals(inputData[3], results.get(3));
-    driver.run("drop table employee");
-  }
-
-  @Test
-  public void testDynamicPartitioningMultiPartColsInDataNoSpec() throws IOException, CommandNeedRetryException{
-
-    driver.run("drop table if exists employee");
-    String createTable = "CREATE TABLE employee (emp_id INT, emp_name STRING, emp_start_date STRING , emp_gender STRING ) " +
-        " PARTITIONED BY (emp_country STRING , emp_state STRING ) STORED AS RCFILE";
-
-    int retCode = driver.run(createTable).getResponseCode();
-    if(retCode != 0) {
-      throw new IOException("Failed to create table.");
-    }
-
-    String[] inputData = {"111237\tKrishna\t01/01/1990\tM\tIN\tTN",
-                          "111238\tKalpana\t01/01/2000\tF\tIN\tKA",
-                          "111239\tSatya\t01/01/2001\tM\tIN\tKL",
-                          "111240\tKavya\t01/01/2002\tF\tIN\tAP"};
-
-    HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, inputData);
-    PigServer pig = new PigServer(ExecType.LOCAL);
-    pig.setBatchOn();
-    pig.registerQuery("A = LOAD '"+INPUT_FILE_NAME+"' USING PigStorage() AS (emp_id:int,emp_name:chararray,emp_start_date:chararray," +
-        "emp_gender:chararray,emp_country:chararray,emp_state:chararray);");
-    pig.registerQuery("IN = FILTER A BY emp_country == 'IN';");
-    pig.registerQuery("STORE IN INTO 'employee' USING "+HCatStorer.class.getName()+"();");
-    pig.executeBatch();
-    driver.run("select * from employee");
-    ArrayList<String> results = new ArrayList<String>();
-    driver.getResults(results);
-    Assert.assertEquals(4, results.size());
-    Collections.sort(results);
-    Assert.assertEquals(inputData[0], results.get(0));
-    Assert.assertEquals(inputData[1], results.get(1));
-    Assert.assertEquals(inputData[2], results.get(2));
-    Assert.assertEquals(inputData[3], results.get(3));
-    driver.run("drop table employee");
-  }
+    private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data";
 
     @Test
-    public void testDynamicPartitioningMultiPartColsNoDataInDataNoSpec() throws IOException, CommandNeedRetryException{
+    public void testPartColsInData() throws IOException, CommandNeedRetryException {
 
-      driver.run("drop table if exists employee");
-      String createTable = "CREATE TABLE employee (emp_id INT, emp_name STRING, emp_start_date STRING , emp_gender STRING ) " +
-          " PARTITIONED BY (emp_country STRING , emp_state STRING ) STORED AS RCFILE";
+        driver.run("drop table junit_unparted");
+        String createTable = "create table junit_unparted(a int) partitioned by (b string) stored as RCFILE";
+        int retCode = driver.run(createTable).getResponseCode();
+        if (retCode != 0) {
+            throw new IOException("Failed to create table.");
+        }
+        int LOOP_SIZE = 11;
+        String[] input = new String[LOOP_SIZE];
+        for (int i = 0; i < LOOP_SIZE; i++) {
+            input[i] = i + "\t1";
+        }
+        HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input);
+        PigServer server = new PigServer(ExecType.LOCAL);
+        server.registerQuery("A = load '" + INPUT_FILE_NAME + "' as (a:int, b:chararray);");
+        server.registerQuery("store A into 'default.junit_unparted' using " + HCatStorer.class.getName() + "('b=1');");
+        server.registerQuery("B = load 'default.junit_unparted' using " + HCatLoader.class.getName() + "();");
+        Iterator<Tuple> itr = server.openIterator("B");
 
-      int retCode = driver.run(createTable).getResponseCode();
-      if(retCode != 0) {
-        throw new IOException("Failed to create table.");
-      }
+        int i = 0;
 
-      String[] inputData = {};
-      HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, inputData);
+        while (itr.hasNext()) {
+            Tuple t = itr.next();
+            Assert.assertEquals(2, t.size());
+            Assert.assertEquals(t.get(0), i);
+            Assert.assertEquals(t.get(1), "1");
+            i++;
+        }
 
-      PigServer pig = new PigServer(ExecType.LOCAL);
-      pig.setBatchOn();
-      pig.registerQuery("A = LOAD '"+INPUT_FILE_NAME+"' USING PigStorage() AS (emp_id:int,emp_name:chararray,emp_start_date:chararray," +
-          "emp_gender:chararray,emp_country:chararray,emp_state:chararray);");
-      pig.registerQuery("IN = FILTER A BY emp_country == 'IN';");
-      pig.registerQuery("STORE IN INTO 'employee' USING "+HCatStorer.class.getName()+"();");
-      pig.executeBatch();
-      driver.run("select * from employee");
-      ArrayList<String> results = new ArrayList<String>();
-      driver.getResults(results);
-      Assert.assertEquals(0, results.size());
-      driver.run("drop table employee");
+        Assert.assertFalse(itr.hasNext());
+        Assert.assertEquals(11, i);
+    }
+
+    @Test
+    public void testMultiPartColsInData() throws IOException, CommandNeedRetryException {
+
+        driver.run("drop table employee");
+        String createTable = "CREATE TABLE employee (emp_id INT, emp_name STRING, emp_start_date STRING , emp_gender STRING ) " +
+            " PARTITIONED BY (emp_country STRING , emp_state STRING ) STORED AS RCFILE";
+
+        int retCode = driver.run(createTable).getResponseCode();
+        if (retCode != 0) {
+            throw new IOException("Failed to create table.");
+        }
+
+        String[] inputData = {"111237\tKrishna\t01/01/1990\tM\tIN\tTN",
+            "111238\tKalpana\t01/01/2000\tF\tIN\tKA",
+            "111239\tSatya\t01/01/2001\tM\tIN\tKL",
+            "111240\tKavya\t01/01/2002\tF\tIN\tAP"};
+
+        HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, inputData);
+        PigServer pig = new PigServer(ExecType.LOCAL);
+        pig.setBatchOn();
+        pig.registerQuery("A = LOAD '" + INPUT_FILE_NAME + "' USING PigStorage() AS (emp_id:int,emp_name:chararray,emp_start_date:chararray," +
+            "emp_gender:chararray,emp_country:chararray,emp_state:chararray);");
+        pig.registerQuery("TN = FILTER A BY emp_state == 'TN';");
+        pig.registerQuery("KA = FILTER A BY emp_state == 'KA';");
+        pig.registerQuery("KL = FILTER A BY emp_state == 'KL';");
+        pig.registerQuery("AP = FILTER A BY emp_state == 'AP';");
+        pig.registerQuery("STORE TN INTO 'employee' USING " + HCatStorer.class.getName() + "('emp_country=IN,emp_state=TN');");
+        pig.registerQuery("STORE KA INTO 'employee' USING " + HCatStorer.class.getName() + "('emp_country=IN,emp_state=KA');");
+        pig.registerQuery("STORE KL INTO 'employee' USING " + HCatStorer.class.getName() + "('emp_country=IN,emp_state=KL');");
+        pig.registerQuery("STORE AP INTO 'employee' USING " + HCatStorer.class.getName() + "('emp_country=IN,emp_state=AP');");
+        pig.executeBatch();
+        driver.run("select * from employee");
+        ArrayList<String> results = new ArrayList<String>();
+        driver.getResults(results);
+        Assert.assertEquals(4, results.size());
+        Collections.sort(results);
+        Assert.assertEquals(inputData[0], results.get(0));
+        Assert.assertEquals(inputData[1], results.get(1));
+        Assert.assertEquals(inputData[2], results.get(2));
+        Assert.assertEquals(inputData[3], results.get(3));
+        driver.run("drop table employee");
+    }
+
+    @Test
+    public void testStoreInPartiitonedTbl() throws IOException, CommandNeedRetryException {
+
+        driver.run("drop table junit_unparted");
+        String createTable = "create table junit_unparted(a int) partitioned by (b string) stored as RCFILE";
+        int retCode = driver.run(createTable).getResponseCode();
+        if (retCode != 0) {
+            throw new IOException("Failed to create table.");
+        }
+        int LOOP_SIZE = 11;
+        String[] input = new String[LOOP_SIZE];
+        for (int i = 0; i < LOOP_SIZE; i++) {
+            input[i] = i + "";
+        }
+        HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input);
+        PigServer server = new PigServer(ExecType.LOCAL);
+        server.registerQuery("A = load '" + INPUT_FILE_NAME + "' as (a:int);");
+        server.registerQuery("store A into 'default.junit_unparted' using " + HCatStorer.class.getName() + "('b=1');");
+        server.registerQuery("B = load 'default.junit_unparted' using " + HCatLoader.class.getName() + "();");
+        Iterator<Tuple> itr = server.openIterator("B");
+
+        int i = 0;
+
+        while (itr.hasNext()) {
+            Tuple t = itr.next();
+            Assert.assertEquals(2, t.size());
+            Assert.assertEquals(t.get(0), i);
+            Assert.assertEquals(t.get(1), "1");
+            i++;
+        }
+
+        Assert.assertFalse(itr.hasNext());
+        Assert.assertEquals(11, i);
+    }
+
+    @Test
+    public void testNoAlias() throws IOException, CommandNeedRetryException {
+        driver.run("drop table junit_parted");
+        String createTable = "create table junit_parted(a int, b string) partitioned by (ds string) stored as RCFILE";
+        int retCode = driver.run(createTable).getResponseCode();
+        if (retCode != 0) {
+            throw new IOException("Failed to create table.");
+        }
+        PigServer server = new PigServer(ExecType.LOCAL);
+        boolean errCaught = false;
+        try {
+            server.setBatchOn();
+            server.registerQuery("A = load '" + INPUT_FILE_NAME + "' as (a:int, b:chararray);");
+            server.registerQuery("B = foreach A generate a+10, b;");
+            server.registerQuery("store B into 'junit_parted' using " + HCatStorer.class.getName() + "('ds=20100101');");
+            server.executeBatch();
+        } catch (PigException fe) {
+            PigException pe = LogUtils.getPigException(fe);
+            Assert.assertTrue(pe instanceof FrontendException);
+            Assert.assertEquals(PigHCatUtil.PIG_EXCEPTION_CODE, pe.getErrorCode());
+            Assert.assertTrue(pe.getMessage().contains("Column name for a field is not specified. Please provide the full schema as an argument to HCatStorer."));
+            errCaught = true;
+        }
+        Assert.assertTrue(errCaught);
+        errCaught = false;
+        try {
+            server.setBatchOn();
+            server.registerQuery("A = load '" + INPUT_FILE_NAME + "' as (a:int, B:chararray);");
+            server.registerQuery("B = foreach A generate a, B;");
+            server.registerQuery("store B into 'junit_parted' using " + HCatStorer.class.getName() + "('ds=20100101');");
+            server.executeBatch();
+        } catch (PigException fe) {
+            PigException pe = LogUtils.getPigException(fe);
+            Assert.assertTrue(pe instanceof FrontendException);
+            Assert.assertEquals(PigHCatUtil.PIG_EXCEPTION_CODE, pe.getErrorCode());
+            Assert.assertTrue(pe.getMessage().contains("Column names should all be in lowercase. Invalid name found: B"));
+            errCaught = true;
+        }
+        driver.run("drop table junit_parted");
+        Assert.assertTrue(errCaught);
+    }
+
+    @Test
+    public void testStoreMultiTables() throws IOException, CommandNeedRetryException {
+
+        driver.run("drop table junit_unparted");
+        String createTable = "create table junit_unparted(a int, b string) stored as RCFILE";
+        int retCode = driver.run(createTable).getResponseCode();
+        if (retCode != 0) {
+            throw new IOException("Failed to create table.");
+        }
+        driver.run("drop table junit_unparted2");
+        createTable = "create table junit_unparted2(a int, b string) stored as RCFILE";
+        retCode = driver.run(createTable).getResponseCode();
+        if (retCode != 0) {
+            throw new IOException("Failed to create table.");
+        }
+
+        int LOOP_SIZE = 3;
+        String[] input = new String[LOOP_SIZE * LOOP_SIZE];
+        int k = 0;
+        for (int i = 1; i <= LOOP_SIZE; i++) {
+            String si = i + "";
+            for (int j = 1; j <= LOOP_SIZE; j++) {
+                input[k++] = si + "\t" + j;
+            }
+        }
+        HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input);
+        PigServer server = new PigServer(ExecType.LOCAL);
+        server.setBatchOn();
+        server.registerQuery("A = load '" + INPUT_FILE_NAME + "' as (a:int, b:chararray);");
+        server.registerQuery("B = filter A by a < 2;");
+        server.registerQuery("store B into 'junit_unparted' using " + HCatStorer.class.getName() + "();");
+        server.registerQuery("C = filter A by a >= 2;");
+        server.registerQuery("store C into 'junit_unparted2' using " + HCatStorer.class.getName() + "();");
+        server.executeBatch();
+
+        driver.run("select * from junit_unparted");
+        ArrayList<String> res = new ArrayList<String>();
+        driver.getResults(res);
+        driver.run("select * from junit_unparted2");
+        ArrayList<String> res2 = new ArrayList<String>();
+        driver.getResults(res2);
+
+        res.addAll(res2);
+        driver.run("drop table junit_unparted");
+        driver.run("drop table junit_unparted2");
+
+        Iterator<String> itr = res.iterator();
+        for (int i = 0; i < LOOP_SIZE * LOOP_SIZE; i++) {
+            Assert.assertEquals(input[i], itr.next());
+        }
+
+        Assert.assertFalse(itr.hasNext());
+
+    }
+
+    @Test
+    public void testStoreWithNoSchema() throws IOException, CommandNeedRetryException {
+
+        driver.run("drop table junit_unparted");
+        String createTable = "create table junit_unparted(a int, b string) stored as RCFILE";
+        int retCode = driver.run(createTable).getResponseCode();
+        if (retCode != 0) {
+            throw new IOException("Failed to create table.");
+        }
+
+        int LOOP_SIZE = 3;
+        String[] input = new String[LOOP_SIZE * LOOP_SIZE];
+        int k = 0;
+        for (int i = 1; i <= LOOP_SIZE; i++) {
+            String si = i + "";
+            for (int j = 1; j <= LOOP_SIZE; j++) {
+                input[k++] = si + "\t" + j;
+            }
+        }
+        HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input);
+        PigServer server = new PigServer(ExecType.LOCAL);
+        server.setBatchOn();
+        server.registerQuery("A = load '" + INPUT_FILE_NAME + "' as (a:int, b:chararray);");
+        server.registerQuery("store A into 'default.junit_unparted' using " + HCatStorer.class.getName() + "('');");
+        server.executeBatch();
+
+        driver.run("select * from junit_unparted");
+        ArrayList<String> res = new ArrayList<String>();
+        driver.getResults(res);
+        driver.run("drop table junit_unparted");
+        Iterator<String> itr = res.iterator();
+        for (int i = 0; i < LOOP_SIZE * LOOP_SIZE; i++) {
+            Assert.assertEquals(input[i], itr.next());
+        }
+
+        Assert.assertFalse(itr.hasNext());
+
+    }
+
+    @Test
+    public void testStoreWithNoCtorArgs() throws IOException, CommandNeedRetryException {
+
+        driver.run("drop table junit_unparted");
+        String createTable = "create table junit_unparted(a int, b string) stored as RCFILE";
+        int retCode = driver.run(createTable).getResponseCode();
+        if (retCode != 0) {
+            throw new IOException("Failed to create table.");
+        }
+
+        int LOOP_SIZE = 3;
+        String[] input = new String[LOOP_SIZE * LOOP_SIZE];
+        int k = 0;
+        for (int i = 1; i <= LOOP_SIZE; i++) {
+            String si = i + "";
+            for (int j = 1; j <= LOOP_SIZE; j++) {
+                input[k++] = si + "\t" + j;
+            }
+        }
+        HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input);
+        PigServer server = new PigServer(ExecType.LOCAL);
+        server.setBatchOn();
+        server.registerQuery("A = load '" + INPUT_FILE_NAME + "' as (a:int, b:chararray);");
+        server.registerQuery("store A into 'junit_unparted' using " + HCatStorer.class.getName() + "();");
+        server.executeBatch();
+
+        driver.run("select * from junit_unparted");
+        ArrayList<String> res = new ArrayList<String>();
+        driver.getResults(res);
+        driver.run("drop table junit_unparted");
+        Iterator<String> itr = res.iterator();
+        for (int i = 0; i < LOOP_SIZE * LOOP_SIZE; i++) {
+            Assert.assertEquals(input[i], itr.next());
+        }
+
+        Assert.assertFalse(itr.hasNext());
+
+    }
+
+    @Test
+    public void testEmptyStore() throws IOException, CommandNeedRetryException {
+
+        driver.run("drop table junit_unparted");
+        String createTable = "create table junit_unparted(a int, b string) stored as RCFILE";
+        int retCode = driver.run(createTable).getResponseCode();
+        if (retCode != 0) {
+            throw new IOException("Failed to create table.");
+        }
+
+        int LOOP_SIZE = 3;
+        String[] input = new String[LOOP_SIZE * LOOP_SIZE];
+        int k = 0;
+        for (int i = 1; i <= LOOP_SIZE; i++) {
+            String si = i + "";
+            for (int j = 1; j <= LOOP_SIZE; j++) {
+                input[k++] = si + "\t" + j;
+            }
+        }
+        HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input);
+        PigServer server = new PigServer(ExecType.LOCAL);
+        server.setBatchOn();
+        server.registerQuery("A = load '" + INPUT_FILE_NAME + "' as (a:int, b:chararray);");
+        server.registerQuery("B = filter A by a > 100;");
+        server.registerQuery("store B into 'default.junit_unparted' using " + HCatStorer.class.getName() + "('','a:int,b:chararray');");
+        server.executeBatch();
+
+        driver.run("select * from junit_unparted");
+        ArrayList<String> res = new ArrayList<String>();
+        driver.getResults(res);
+        driver.run("drop table junit_unparted");
+        Iterator<String> itr = res.iterator();
+        Assert.assertFalse(itr.hasNext());
+
+    }
+
+    @Test
+    public void testBagNStruct() throws IOException, CommandNeedRetryException {
+        driver.run("drop table junit_unparted");
+        String createTable = "create table junit_unparted(b string,a struct<a1:int>,  arr_of_struct array<string>, " +
+            "arr_of_struct2 array<struct<s1:string,s2:string>>,  arr_of_struct3 array<struct<s3:string>>) stored as RCFILE";
+        int retCode = driver.run(createTable).getResponseCode();
+        if (retCode != 0) {
+            throw new IOException("Failed to create table.");
+        }
+
+        String[] inputData = new String[]{"zookeeper\t(2)\t{(pig)}\t{(pnuts,hdfs)}\t{(hadoop),(hcat)}",
+            "chubby\t(2)\t{(sawzall)}\t{(bigtable,gfs)}\t{(mapreduce),(hcat)}"};
+
+        HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, inputData);
+
+        PigServer server = new PigServer(ExecType.LOCAL);
+        server.setBatchOn();
+        server.registerQuery("A = load '" + INPUT_FILE_NAME + "' as (b:chararray, a:tuple(a1:int), arr_of_struct:bag{mytup:tuple(s1:chararray)}, arr_of_struct2:bag{mytup:tuple(s1:chararray,s2:chararray)}, arr_of_struct3:bag{t3:tuple(s3:chararray)});");
+        server.registerQuery("store A into 'default.junit_unparted' using " + HCatStorer.class.getName() + "('','b:chararray, a:tuple(a1:int)," +
+            " arr_of_struct:bag{mytup:tuple(s1:chararray)}, arr_of_struct2:bag{mytup:tuple(s1:chararray,s2:chararray)}, arr_of_struct3:bag{t3:tuple(s3:chararray)}');");
+        server.executeBatch();
+
+        driver.run("select * from junit_unparted");
+        ArrayList<String> res = new ArrayList<String>();
+        driver.getResults(res);
+        driver.run("drop table junit_unparted");
+        Iterator<String> itr = res.iterator();
+        Assert.assertEquals("zookeeper\t{\"a1\":2}\t[\"pig\"]\t[{\"s1\":\"pnuts\",\"s2\":\"hdfs\"}]\t[{\"s3\":\"hadoop\"},{\"s3\":\"hcat\"}]", itr.next());
+        Assert.assertEquals("chubby\t{\"a1\":2}\t[\"sawzall\"]\t[{\"s1\":\"bigtable\",\"s2\":\"gfs\"}]\t[{\"s3\":\"mapreduce\"},{\"s3\":\"hcat\"}]", itr.next());
+        Assert.assertFalse(itr.hasNext());
+
+    }
+
+    @Test
+    public void testStoreFuncAllSimpleTypes() throws IOException, CommandNeedRetryException {
+
+        driver.run("drop table junit_unparted");
+        String createTable = "create table junit_unparted(a int, b float, c double, d bigint, e string, f binary, g binary) stored as RCFILE";
+        int retCode = driver.run(createTable).getResponseCode();
+        if (retCode != 0) {
+            throw new IOException("Failed to create table.");
+        }
+
+        int i = 0;
+        String[] input = new String[3];
+        input[i++] = "0\t\t\t\t\t\t"; //Empty values except first column
+        input[i++] = "\t" + i * 2.1f + "\t" + i * 1.1d + "\t" + i * 2L + "\t" + "lets hcat" + "\tbinary-data"; //First column empty
+        input[i++] = i + "\t" + i * 2.1f + "\t" + i * 1.1d + "\t" + i * 2L + "\t" + "lets hcat" + "\tbinary-data";
+
+        HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, input);
+        PigServer server = new PigServer(ExecType.LOCAL);
+        server.setBatchOn();
+        server.registerQuery("A = load '" + INPUT_FILE_NAME + "' as (a:int, b:float, c:double, d:long, e:chararray, f:bytearray);");
+        //null gets stored into column g which is a binary field.
+        server.registerQuery("store A into 'default.junit_unparted' using " + HCatStorer.class.getName() + "('','a:int, b:float, c:double, d:long, e:chararray,f:bytearray');");
+        server.executeBatch();
+
+
+        driver.run("select * from junit_unparted");
+        ArrayList<String> res = new ArrayList<String>();
+        driver.getResults(res);
+
+        Iterator<String> itr = res.iterator();
+        Assert.assertEquals("0\tNULL\tNULL\tNULL\tNULL\tnull\tnull", itr.next());
+        Assert.assertEquals("NULL\t4.2\t2.2\t4\tlets hcat\tbinary-data\tnull", itr.next());
+        Assert.assertEquals("3\t6.2999997\t3.3000000000000003\t6\tlets hcat\tbinary-data\tnull", itr.next());
+        Assert.assertFalse(itr.hasNext());
+
+        server.registerQuery("B = load 'junit_unparted' using " + HCatLoader.class.getName() + ";");
+        Iterator<Tuple> iter = server.openIterator("B");
+        int count = 0;
+        int num5nulls = 0;
+        while (iter.hasNext()) {
+            Tuple t = iter.next();
+            if (t.get(5) == null) {
+                num5nulls++;
+            } else {
+                Assert.assertTrue(t.get(5) instanceof DataByteArray);
+            }
+            Assert.assertNull(t.get(6));
+            count++;
+        }
+        Assert.assertEquals(3, count);
+        Assert.assertEquals(1, num5nulls);
+        driver.run("drop table junit_unparted");
+    }
+
+    @Test
+    public void testStoreFuncSimple() throws IOException, CommandNeedRetryException {
+
+        driver.run("drop table junit_unparted");
+        String createTable = "create table junit_unparted(a int, b string) stored as RCFILE";
+        int retCode = driver.run(createTable).getResponseCode();
+        if (retCode != 0) {
+            throw new IOException("Failed to create table.");
+        }
+
+        int LOOP_SIZE = 3;
+        String[] inputData = new String[LOOP_SIZE * LOOP_SIZE];
+        int k = 0;
+        for (int i = 1; i <= LOOP_SIZE; i++) {
+            String si = i + "";
+            for (int j = 1; j <= LOOP_SIZE; j++) {
+                inputData[k++] = si + "\t" + j;
+            }
+        }
+        HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, inputData);
+        PigServer server = new PigServer(ExecType.LOCAL);
+        server.setBatchOn();
+        server.registerQuery("A = load '" + INPUT_FILE_NAME + "' as (a:int, b:chararray);");
+        server.registerQuery("store A into 'default.junit_unparted' using " + HCatStorer.class.getName() + "('','a:int,b:chararray');");
+        server.executeBatch();
+
+        driver.run("select * from junit_unparted");
+        ArrayList<String> res = new ArrayList<String>();
+        driver.getResults(res);
+        driver.run("drop table junit_unparted");
+        Iterator<String> itr = res.iterator();
+        for (int i = 1; i <= LOOP_SIZE; i++) {
+            String si = i + "";
+            for (int j = 1; j <= LOOP_SIZE; j++) {
+                Assert.assertEquals(si + "\t" + j, itr.next());
+            }
+        }
+        Assert.assertFalse(itr.hasNext());
+
+    }
+
+    @Test
+    public void testDynamicPartitioningMultiPartColsInDataPartialSpec() throws IOException, CommandNeedRetryException {
+
+        driver.run("drop table if exists employee");
+        String createTable = "CREATE TABLE employee (emp_id INT, emp_name STRING, emp_start_date STRING , emp_gender STRING ) " +
+            " PARTITIONED BY (emp_country STRING , emp_state STRING ) STORED AS RCFILE";
+
+        int retCode = driver.run(createTable).getResponseCode();
+        if (retCode != 0) {
+            throw new IOException("Failed to create table.");
+        }
+
+        String[] inputData = {"111237\tKrishna\t01/01/1990\tM\tIN\tTN",
+            "111238\tKalpana\t01/01/2000\tF\tIN\tKA",
+            "111239\tSatya\t01/01/2001\tM\tIN\tKL",
+            "111240\tKavya\t01/01/2002\tF\tIN\tAP"};
+
+        HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, inputData);
+        PigServer pig = new PigServer(ExecType.LOCAL);
+        pig.setBatchOn();
+        pig.registerQuery("A = LOAD '" + INPUT_FILE_NAME + "' USING PigStorage() AS (emp_id:int,emp_name:chararray,emp_start_date:chararray," +
+            "emp_gender:chararray,emp_country:chararray,emp_state:chararray);");
+        pig.registerQuery("IN = FILTER A BY emp_country == 'IN';");
+        pig.registerQuery("STORE IN INTO 'employee' USING " + HCatStorer.class.getName() + "('emp_country=IN');");
+        pig.executeBatch();
+        driver.run("select * from employee");
+        ArrayList<String> results = new ArrayList<String>();
+        driver.getResults(results);
+        Assert.assertEquals(4, results.size());
+        Collections.sort(results);
+        Assert.assertEquals(inputData[0], results.get(0));
+        Assert.assertEquals(inputData[1], results.get(1));
+        Assert.assertEquals(inputData[2], results.get(2));
+        Assert.assertEquals(inputData[3], results.get(3));
+        driver.run("drop table employee");
+    }
+
+    @Test
+    public void testDynamicPartitioningMultiPartColsInDataNoSpec() throws IOException, CommandNeedRetryException {
+
+        driver.run("drop table if exists employee");
+        String createTable = "CREATE TABLE employee (emp_id INT, emp_name STRING, emp_start_date STRING , emp_gender STRING ) " +
+            " PARTITIONED BY (emp_country STRING , emp_state STRING ) STORED AS RCFILE";
+
+        int retCode = driver.run(createTable).getResponseCode();
+        if (retCode != 0) {
+            throw new IOException("Failed to create table.");
+        }
+
+        String[] inputData = {"111237\tKrishna\t01/01/1990\tM\tIN\tTN",
+            "111238\tKalpana\t01/01/2000\tF\tIN\tKA",
+            "111239\tSatya\t01/01/2001\tM\tIN\tKL",
+            "111240\tKavya\t01/01/2002\tF\tIN\tAP"};
+
+        HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, inputData);
+        PigServer pig = new PigServer(ExecType.LOCAL);
+        pig.setBatchOn();
+        pig.registerQuery("A = LOAD '" + INPUT_FILE_NAME + "' USING PigStorage() AS (emp_id:int,emp_name:chararray,emp_start_date:chararray," +
+            "emp_gender:chararray,emp_country:chararray,emp_state:chararray);");
+        pig.registerQuery("IN = FILTER A BY emp_country == 'IN';");
+        pig.registerQuery("STORE IN INTO 'employee' USING " + HCatStorer.class.getName() + "();");
+        pig.executeBatch();
+        driver.run("select * from employee");
+        ArrayList<String> results = new ArrayList<String>();
+        driver.getResults(results);
+        Assert.assertEquals(4, results.size());
+        Collections.sort(results);
+        Assert.assertEquals(inputData[0], results.get(0));
+        Assert.assertEquals(inputData[1], results.get(1));
+        Assert.assertEquals(inputData[2], results.get(2));
+        Assert.assertEquals(inputData[3], results.get(3));
+        driver.run("drop table employee");
+    }
+
+    @Test
+    public void testDynamicPartitioningMultiPartColsNoDataInDataNoSpec() throws IOException, CommandNeedRetryException {
+
+        driver.run("drop table if exists employee");
+        String createTable = "CREATE TABLE employee (emp_id INT, emp_name STRING, emp_start_date STRING , emp_gender STRING ) " +
+            " PARTITIONED BY (emp_country STRING , emp_state STRING ) STORED AS RCFILE";
+
+        int retCode = driver.run(createTable).getResponseCode();
+        if (retCode != 0) {
+            throw new IOException("Failed to create table.");
+        }
+
+        String[] inputData = {};
+        HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, inputData);
+
+        PigServer pig = new PigServer(ExecType.LOCAL);
+        pig.setBatchOn();
+        pig.registerQuery("A = LOAD '" + INPUT_FILE_NAME + "' USING PigStorage() AS (emp_id:int,emp_name:chararray,emp_start_date:chararray," +
+            "emp_gender:chararray,emp_country:chararray,emp_state:chararray);");
+        pig.registerQuery("IN = FILTER A BY emp_country == 'IN';");
+        pig.registerQuery("STORE IN INTO 'employee' USING " + HCatStorer.class.getName() + "();");
+        pig.executeBatch();
+        driver.run("select * from employee");
+        ArrayList<String> results = new ArrayList<String>();
+        driver.getResults(results);
+        Assert.assertEquals(0, results.size());
+        driver.run("drop table employee");
     }
 }
diff --git a/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatStorerMulti.java b/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatStorerMulti.java
index 7dddad9..98fe887 100644
--- a/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatStorerMulti.java
+++ b/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatStorerMulti.java
@@ -37,159 +37,160 @@
 import org.apache.pig.PigServer;
 
 public class TestHCatStorerMulti extends TestCase {
-  private static final String TEST_DATA_DIR = System.getProperty("user.dir") +
-      "/build/test/data/" + TestHCatStorerMulti.class.getCanonicalName();
-  private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse";
-  private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data";
+    private static final String TEST_DATA_DIR = System.getProperty("user.dir") +
+        "/build/test/data/" + TestHCatStorerMulti.class.getCanonicalName();
+    private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse";
+    private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data";
 
-  private static final String BASIC_TABLE = "junit_unparted_basic";
-  private static final String PARTITIONED_TABLE = "junit_parted_basic";
-  private static Driver driver;
+    private static final String BASIC_TABLE = "junit_unparted_basic";
+    private static final String PARTITIONED_TABLE = "junit_parted_basic";
+    private static Driver driver;
 
-  private static Map<Integer,Pair<Integer,String>> basicInputData;
+    private static Map<Integer, Pair<Integer, String>> basicInputData;
 
-  private void dropTable(String tablename) throws IOException, CommandNeedRetryException{
-    driver.run("drop table "+tablename);
-  }
-  private void createTable(String tablename, String schema, String partitionedBy) throws IOException, CommandNeedRetryException{
-    String createTable;
-    createTable = "create table "+tablename+"("+schema+") ";
-    if ((partitionedBy != null)&&(!partitionedBy.trim().isEmpty())){
-      createTable = createTable + "partitioned by ("+partitionedBy+") ";
-    }
-    createTable = createTable + "stored as RCFILE tblproperties('hcat.isd'='org.apache.hcatalog.rcfile.RCFileInputDriver'," +
-    "'hcat.osd'='org.apache.hcatalog.rcfile.RCFileOutputDriver') ";
-    int retCode = driver.run(createTable).getResponseCode();
-    if(retCode != 0) {
-      throw new IOException("Failed to create table. ["+createTable+"], return code from hive driver : ["+retCode+"]");
-    }
-  }
-
-  private void createTable(String tablename, String schema) throws IOException, CommandNeedRetryException{
-    createTable(tablename,schema,null);
-  }
-
-  @Override
-  protected void setUp() throws Exception {
-    if (driver == null){
-      HiveConf hiveConf = new HiveConf(this.getClass());
-      hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-      hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-      hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-      hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR);
-      driver = new Driver(hiveConf);
-      SessionState.start(new CliSessionState(hiveConf));
+    private void dropTable(String tablename) throws IOException, CommandNeedRetryException {
+        driver.run("drop table " + tablename);
     }
 
-    cleanup();
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    cleanup();
-  }
-
-  public void testStoreBasicTable() throws Exception {
-
-
-    createTable(BASIC_TABLE,"a int, b string");
-
-    populateBasicFile();
-
-    PigServer server = new PigServer(ExecType.LOCAL);
-    server.setBatchOn();
-    server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (a:int, b:chararray);");
-    server.registerQuery("store A into '"+BASIC_TABLE+"' using org.apache.hcatalog.pig.HCatStorer();");
-
-    server.executeBatch();
-
-    driver.run("select * from "+BASIC_TABLE);
-    ArrayList<String> unpartitionedTableValuesReadFromHiveDriver = new ArrayList<String>();
-    driver.getResults(unpartitionedTableValuesReadFromHiveDriver);
-    assertEquals(basicInputData.size(),unpartitionedTableValuesReadFromHiveDriver.size());
-  }
-
-  public void testStorePartitionedTable() throws Exception {
-    createTable(PARTITIONED_TABLE,"a int, b string","bkt string");
-
-    populateBasicFile();
-
-    PigServer server = new PigServer(ExecType.LOCAL);
-    server.setBatchOn();
-    server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (a:int, b:chararray);");
-
-    server.registerQuery("B2 = filter A by a < 2;");
-    server.registerQuery("store B2 into '"+PARTITIONED_TABLE+"' using org.apache.hcatalog.pig.HCatStorer('bkt=0');");
-    server.registerQuery("C2 = filter A by a >= 2;");
-    server.registerQuery("store C2 into '"+PARTITIONED_TABLE+"' using org.apache.hcatalog.pig.HCatStorer('bkt=1');");
-
-    server.executeBatch();
-
-    driver.run("select * from "+PARTITIONED_TABLE);
-    ArrayList<String> partitionedTableValuesReadFromHiveDriver = new ArrayList<String>();
-    driver.getResults(partitionedTableValuesReadFromHiveDriver);
-    assertEquals(basicInputData.size(),partitionedTableValuesReadFromHiveDriver.size());
-  }
-
-  public void testStoreTableMulti() throws Exception {
-
-
-    createTable(BASIC_TABLE,"a int, b string");
-    createTable(PARTITIONED_TABLE,"a int, b string","bkt string");
-
-    populateBasicFile();
-
-    PigServer server = new PigServer(ExecType.LOCAL);
-    server.setBatchOn();
-    server.registerQuery("A = load '"+INPUT_FILE_NAME+"' as (a:int, b:chararray);");
-    server.registerQuery("store A into '"+BASIC_TABLE+"' using org.apache.hcatalog.pig.HCatStorer();");
-
-    server.registerQuery("B2 = filter A by a < 2;");
-    server.registerQuery("store B2 into '"+PARTITIONED_TABLE+"' using org.apache.hcatalog.pig.HCatStorer('bkt=0');");
-    server.registerQuery("C2 = filter A by a >= 2;");
-    server.registerQuery("store C2 into '"+PARTITIONED_TABLE+"' using org.apache.hcatalog.pig.HCatStorer('bkt=1');");
-
-    server.executeBatch();
-
-    driver.run("select * from "+BASIC_TABLE);
-    ArrayList<String> unpartitionedTableValuesReadFromHiveDriver = new ArrayList<String>();
-    driver.getResults(unpartitionedTableValuesReadFromHiveDriver);
-    driver.run("select * from "+PARTITIONED_TABLE);
-    ArrayList<String> partitionedTableValuesReadFromHiveDriver = new ArrayList<String>();
-    driver.getResults(partitionedTableValuesReadFromHiveDriver);
-    assertEquals(basicInputData.size(),unpartitionedTableValuesReadFromHiveDriver.size());
-    assertEquals(basicInputData.size(),partitionedTableValuesReadFromHiveDriver.size());
-  }
-
-  private void populateBasicFile() throws IOException {
-    int LOOP_SIZE = 3;
-    String[] input = new String[LOOP_SIZE*LOOP_SIZE];
-    basicInputData = new HashMap<Integer,Pair<Integer,String>>();
-    int k = 0;
-    File file = new File(INPUT_FILE_NAME);
-    file.deleteOnExit();
-    FileWriter writer = new FileWriter(file);
-    for(int i = 1; i <= LOOP_SIZE; i++) {
-      String si = i + "";
-      for(int j=1;j<=LOOP_SIZE;j++) {
-        String sj = "S"+j+"S";
-        input[k] = si + "\t" + sj;
-        basicInputData.put(k, new Pair<Integer,String>(i,sj));
-        writer.write(input[k] + "\n");
-        k++;
-      }
+    private void createTable(String tablename, String schema, String partitionedBy) throws IOException, CommandNeedRetryException {
+        String createTable;
+        createTable = "create table " + tablename + "(" + schema + ") ";
+        if ((partitionedBy != null) && (!partitionedBy.trim().isEmpty())) {
+            createTable = createTable + "partitioned by (" + partitionedBy + ") ";
+        }
+        createTable = createTable + "stored as RCFILE tblproperties('hcat.isd'='org.apache.hcatalog.rcfile.RCFileInputDriver'," +
+            "'hcat.osd'='org.apache.hcatalog.rcfile.RCFileOutputDriver') ";
+        int retCode = driver.run(createTable).getResponseCode();
+        if (retCode != 0) {
+            throw new IOException("Failed to create table. [" + createTable + "], return code from hive driver : [" + retCode + "]");
+        }
     }
-    writer.close();
-  }
 
-  private void cleanup() throws IOException, CommandNeedRetryException {
-    File f = new File(TEST_WAREHOUSE_DIR);
-    if (f.exists()) {
-      FileUtil.fullyDelete(f);
+    private void createTable(String tablename, String schema) throws IOException, CommandNeedRetryException {
+        createTable(tablename, schema, null);
     }
-    new File(TEST_WAREHOUSE_DIR).mkdirs();
 
-    dropTable(BASIC_TABLE);
-    dropTable(PARTITIONED_TABLE);
-  }
+    @Override
+    protected void setUp() throws Exception {
+        if (driver == null) {
+            HiveConf hiveConf = new HiveConf(this.getClass());
+            hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+            hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+            hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+            hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR);
+            driver = new Driver(hiveConf);
+            SessionState.start(new CliSessionState(hiveConf));
+        }
+
+        cleanup();
+    }
+
+    @Override
+    protected void tearDown() throws Exception {
+        cleanup();
+    }
+
+    public void testStoreBasicTable() throws Exception {
+
+
+        createTable(BASIC_TABLE, "a int, b string");
+
+        populateBasicFile();
+
+        PigServer server = new PigServer(ExecType.LOCAL);
+        server.setBatchOn();
+        server.registerQuery("A = load '" + INPUT_FILE_NAME + "' as (a:int, b:chararray);");
+        server.registerQuery("store A into '" + BASIC_TABLE + "' using org.apache.hcatalog.pig.HCatStorer();");
+
+        server.executeBatch();
+
+        driver.run("select * from " + BASIC_TABLE);
+        ArrayList<String> unpartitionedTableValuesReadFromHiveDriver = new ArrayList<String>();
+        driver.getResults(unpartitionedTableValuesReadFromHiveDriver);
+        assertEquals(basicInputData.size(), unpartitionedTableValuesReadFromHiveDriver.size());
+    }
+
+    public void testStorePartitionedTable() throws Exception {
+        createTable(PARTITIONED_TABLE, "a int, b string", "bkt string");
+
+        populateBasicFile();
+
+        PigServer server = new PigServer(ExecType.LOCAL);
+        server.setBatchOn();
+        server.registerQuery("A = load '" + INPUT_FILE_NAME + "' as (a:int, b:chararray);");
+
+        server.registerQuery("B2 = filter A by a < 2;");
+        server.registerQuery("store B2 into '" + PARTITIONED_TABLE + "' using org.apache.hcatalog.pig.HCatStorer('bkt=0');");
+        server.registerQuery("C2 = filter A by a >= 2;");
+        server.registerQuery("store C2 into '" + PARTITIONED_TABLE + "' using org.apache.hcatalog.pig.HCatStorer('bkt=1');");
+
+        server.executeBatch();
+
+        driver.run("select * from " + PARTITIONED_TABLE);
+        ArrayList<String> partitionedTableValuesReadFromHiveDriver = new ArrayList<String>();
+        driver.getResults(partitionedTableValuesReadFromHiveDriver);
+        assertEquals(basicInputData.size(), partitionedTableValuesReadFromHiveDriver.size());
+    }
+
+    public void testStoreTableMulti() throws Exception {
+
+
+        createTable(BASIC_TABLE, "a int, b string");
+        createTable(PARTITIONED_TABLE, "a int, b string", "bkt string");
+
+        populateBasicFile();
+
+        PigServer server = new PigServer(ExecType.LOCAL);
+        server.setBatchOn();
+        server.registerQuery("A = load '" + INPUT_FILE_NAME + "' as (a:int, b:chararray);");
+        server.registerQuery("store A into '" + BASIC_TABLE + "' using org.apache.hcatalog.pig.HCatStorer();");
+
+        server.registerQuery("B2 = filter A by a < 2;");
+        server.registerQuery("store B2 into '" + PARTITIONED_TABLE + "' using org.apache.hcatalog.pig.HCatStorer('bkt=0');");
+        server.registerQuery("C2 = filter A by a >= 2;");
+        server.registerQuery("store C2 into '" + PARTITIONED_TABLE + "' using org.apache.hcatalog.pig.HCatStorer('bkt=1');");
+
+        server.executeBatch();
+
+        driver.run("select * from " + BASIC_TABLE);
+        ArrayList<String> unpartitionedTableValuesReadFromHiveDriver = new ArrayList<String>();
+        driver.getResults(unpartitionedTableValuesReadFromHiveDriver);
+        driver.run("select * from " + PARTITIONED_TABLE);
+        ArrayList<String> partitionedTableValuesReadFromHiveDriver = new ArrayList<String>();
+        driver.getResults(partitionedTableValuesReadFromHiveDriver);
+        assertEquals(basicInputData.size(), unpartitionedTableValuesReadFromHiveDriver.size());
+        assertEquals(basicInputData.size(), partitionedTableValuesReadFromHiveDriver.size());
+    }
+
+    private void populateBasicFile() throws IOException {
+        int LOOP_SIZE = 3;
+        String[] input = new String[LOOP_SIZE * LOOP_SIZE];
+        basicInputData = new HashMap<Integer, Pair<Integer, String>>();
+        int k = 0;
+        File file = new File(INPUT_FILE_NAME);
+        file.deleteOnExit();
+        FileWriter writer = new FileWriter(file);
+        for (int i = 1; i <= LOOP_SIZE; i++) {
+            String si = i + "";
+            for (int j = 1; j <= LOOP_SIZE; j++) {
+                String sj = "S" + j + "S";
+                input[k] = si + "\t" + sj;
+                basicInputData.put(k, new Pair<Integer, String>(i, sj));
+                writer.write(input[k] + "\n");
+                k++;
+            }
+        }
+        writer.close();
+    }
+
+    private void cleanup() throws IOException, CommandNeedRetryException {
+        File f = new File(TEST_WAREHOUSE_DIR);
+        if (f.exists()) {
+            FileUtil.fullyDelete(f);
+        }
+        new File(TEST_WAREHOUSE_DIR).mkdirs();
+
+        dropTable(BASIC_TABLE);
+        dropTable(PARTITIONED_TABLE);
+    }
 }
diff --git a/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestPigHCatUtil.java b/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestPigHCatUtil.java
index 0ce1088..73a6e73 100644
--- a/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestPigHCatUtil.java
+++ b/hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestPigHCatUtil.java
@@ -31,60 +31,60 @@
 
 public class TestPigHCatUtil {
 
-  @Test
-  public void testGetBagSubSchema() throws Exception {
+    @Test
+    public void testGetBagSubSchema() throws Exception {
 
-    // Define the expected schema.
-    ResourceFieldSchema[] bagSubFieldSchemas = new ResourceFieldSchema[1];
-    bagSubFieldSchemas[0] = new ResourceFieldSchema().setName("innertuple")
-        .setDescription("The tuple in the bag").setType(DataType.TUPLE);
+        // Define the expected schema.
+        ResourceFieldSchema[] bagSubFieldSchemas = new ResourceFieldSchema[1];
+        bagSubFieldSchemas[0] = new ResourceFieldSchema().setName("innertuple")
+            .setDescription("The tuple in the bag").setType(DataType.TUPLE);
 
-    ResourceFieldSchema[] innerTupleFieldSchemas = new ResourceFieldSchema[1];
-    innerTupleFieldSchemas[0] =
-        new ResourceFieldSchema().setName("innerfield").setType(DataType.CHARARRAY);
+        ResourceFieldSchema[] innerTupleFieldSchemas = new ResourceFieldSchema[1];
+        innerTupleFieldSchemas[0] =
+            new ResourceFieldSchema().setName("innerfield").setType(DataType.CHARARRAY);
 
-    bagSubFieldSchemas[0].setSchema(new ResourceSchema().setFields(innerTupleFieldSchemas));
-    ResourceSchema expected = new ResourceSchema().setFields(bagSubFieldSchemas);
+        bagSubFieldSchemas[0].setSchema(new ResourceSchema().setFields(innerTupleFieldSchemas));
+        ResourceSchema expected = new ResourceSchema().setFields(bagSubFieldSchemas);
 
-    // Get the actual converted schema.
-    HCatSchema hCatSchema = new HCatSchema(Lists.newArrayList(
-        new HCatFieldSchema("innerLlama", HCatFieldSchema.Type.STRING, null)));
-    HCatFieldSchema hCatFieldSchema =
-        new HCatFieldSchema("llama", HCatFieldSchema.Type.ARRAY, hCatSchema, null);
-    ResourceSchema actual = PigHCatUtil.getBagSubSchema(hCatFieldSchema);
+        // Get the actual converted schema.
+        HCatSchema hCatSchema = new HCatSchema(Lists.newArrayList(
+            new HCatFieldSchema("innerLlama", HCatFieldSchema.Type.STRING, null)));
+        HCatFieldSchema hCatFieldSchema =
+            new HCatFieldSchema("llama", HCatFieldSchema.Type.ARRAY, hCatSchema, null);
+        ResourceSchema actual = PigHCatUtil.getBagSubSchema(hCatFieldSchema);
 
-    Assert.assertEquals(expected.toString(), actual.toString());
-  }
+        Assert.assertEquals(expected.toString(), actual.toString());
+    }
 
-  @Test
-  public void testGetBagSubSchemaConfigured() throws Exception {
+    @Test
+    public void testGetBagSubSchemaConfigured() throws Exception {
 
-    // NOTE: pig-0.8 sets client system properties by actually getting the client
-    // system properties. Starting in pig-0.9 you must pass the properties in.
-    // When updating our pig dependency this will need updated.
-    System.setProperty(HCatConstants.HCAT_PIG_INNER_TUPLE_NAME, "t");
-    System.setProperty(HCatConstants.HCAT_PIG_INNER_FIELD_NAME, "FIELDNAME_tuple");
-    UDFContext.getUDFContext().setClientSystemProps();
+        // NOTE: pig-0.8 sets client system properties by actually getting the client
+        // system properties. Starting in pig-0.9 you must pass the properties in.
+        // When updating our pig dependency this will need updated.
+        System.setProperty(HCatConstants.HCAT_PIG_INNER_TUPLE_NAME, "t");
+        System.setProperty(HCatConstants.HCAT_PIG_INNER_FIELD_NAME, "FIELDNAME_tuple");
+        UDFContext.getUDFContext().setClientSystemProps();
 
-    // Define the expected schema.
-    ResourceFieldSchema[] bagSubFieldSchemas = new ResourceFieldSchema[1];
-    bagSubFieldSchemas[0] = new ResourceFieldSchema().setName("t")
-        .setDescription("The tuple in the bag").setType(DataType.TUPLE);
+        // Define the expected schema.
+        ResourceFieldSchema[] bagSubFieldSchemas = new ResourceFieldSchema[1];
+        bagSubFieldSchemas[0] = new ResourceFieldSchema().setName("t")
+            .setDescription("The tuple in the bag").setType(DataType.TUPLE);
 
-    ResourceFieldSchema[] innerTupleFieldSchemas = new ResourceFieldSchema[1];
-    innerTupleFieldSchemas[0] =
-        new ResourceFieldSchema().setName("llama_tuple").setType(DataType.CHARARRAY);
+        ResourceFieldSchema[] innerTupleFieldSchemas = new ResourceFieldSchema[1];
+        innerTupleFieldSchemas[0] =
+            new ResourceFieldSchema().setName("llama_tuple").setType(DataType.CHARARRAY);
 
-    bagSubFieldSchemas[0].setSchema(new ResourceSchema().setFields(innerTupleFieldSchemas));
-    ResourceSchema expected = new ResourceSchema().setFields(bagSubFieldSchemas);
+        bagSubFieldSchemas[0].setSchema(new ResourceSchema().setFields(innerTupleFieldSchemas));
+        ResourceSchema expected = new ResourceSchema().setFields(bagSubFieldSchemas);
 
-    // Get the actual converted schema.
-    HCatSchema actualHCatSchema = new HCatSchema(Lists.newArrayList(
-        new HCatFieldSchema("innerLlama", HCatFieldSchema.Type.STRING, null)));
-    HCatFieldSchema actualHCatFieldSchema =
-        new HCatFieldSchema("llama", HCatFieldSchema.Type.ARRAY, actualHCatSchema, null);
-    ResourceSchema actual = PigHCatUtil.getBagSubSchema(actualHCatFieldSchema);
+        // Get the actual converted schema.
+        HCatSchema actualHCatSchema = new HCatSchema(Lists.newArrayList(
+            new HCatFieldSchema("innerLlama", HCatFieldSchema.Type.STRING, null)));
+        HCatFieldSchema actualHCatFieldSchema =
+            new HCatFieldSchema("llama", HCatFieldSchema.Type.ARRAY, actualHCatSchema, null);
+        ResourceSchema actual = PigHCatUtil.getBagSubSchema(actualHCatFieldSchema);
 
-    Assert.assertEquals(expected.toString(), actual.toString());
-  }
+        Assert.assertEquals(expected.toString(), actual.toString());
+    }
 }
diff --git a/shims/src/20/java/org/apache/hcatalog/shims/HCatHadoopShims20S.java b/shims/src/20/java/org/apache/hcatalog/shims/HCatHadoopShims20S.java
index 000689b..5deea06 100644
--- a/shims/src/20/java/org/apache/hcatalog/shims/HCatHadoopShims20S.java
+++ b/shims/src/20/java/org/apache/hcatalog/shims/HCatHadoopShims20S.java
@@ -46,9 +46,8 @@
         return new TaskAttemptID();
     }
 
-	@Override
-	public TaskAttemptContext createTaskAttemptContext(Configuration conf,
-			TaskAttemptID taskId) {
+    @Override
+    public TaskAttemptContext createTaskAttemptContext(Configuration conf, TaskAttemptID taskId) {
         return new TaskAttemptContext(conf, taskId);
     }
 
@@ -133,12 +132,12 @@
     @Override
     public String getPropertyName(PropertyName name) {
         switch (name) {
-            case CACHE_ARCHIVES:
-                return DistributedCache.CACHE_ARCHIVES;
-            case CACHE_FILES:
-                return DistributedCache.CACHE_FILES;
-            case CACHE_SYMLINK:
-                return DistributedCache.CACHE_SYMLINK;
+        case CACHE_ARCHIVES:
+            return DistributedCache.CACHE_ARCHIVES;
+        case CACHE_FILES:
+            return DistributedCache.CACHE_FILES;
+        case CACHE_SYMLINK:
+            return DistributedCache.CACHE_SYMLINK;
         }
 
         return "";
diff --git a/shims/src/23/java/org/apache/hcatalog/shims/HCatHadoopShims23.java b/shims/src/23/java/org/apache/hcatalog/shims/HCatHadoopShims23.java
index 57276a8..d7181cf 100644
--- a/shims/src/23/java/org/apache/hcatalog/shims/HCatHadoopShims23.java
+++ b/shims/src/23/java/org/apache/hcatalog/shims/HCatHadoopShims23.java
@@ -49,20 +49,20 @@
 
     @Override
     public org.apache.hadoop.mapreduce.TaskAttemptContext createTaskAttemptContext(Configuration conf,
-            org.apache.hadoop.mapreduce.TaskAttemptID taskId) {
+                                                                                   org.apache.hadoop.mapreduce.TaskAttemptID taskId) {
         return new org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl(conf, taskId);
     }
 
     @Override
     public org.apache.hadoop.mapred.TaskAttemptContext createTaskAttemptContext(org.apache.hadoop.mapred.JobConf conf,
-            org.apache.hadoop.mapred.TaskAttemptID taskId, Progressable progressable) {
+                                                                                org.apache.hadoop.mapred.TaskAttemptID taskId, Progressable progressable) {
         org.apache.hadoop.mapred.TaskAttemptContext newContext = null;
         try {
             java.lang.reflect.Constructor construct = org.apache.hadoop.mapred.TaskAttemptContextImpl.class.getDeclaredConstructor(
-                    org.apache.hadoop.mapred.JobConf.class, org.apache.hadoop.mapred.TaskAttemptID.class,
-                    Reporter.class);
+                org.apache.hadoop.mapred.JobConf.class, org.apache.hadoop.mapred.TaskAttemptID.class,
+                Reporter.class);
             construct.setAccessible(true);
-            newContext = (org.apache.hadoop.mapred.TaskAttemptContext)construct.newInstance(conf, taskId, (Reporter)progressable);
+            newContext = (org.apache.hadoop.mapred.TaskAttemptContext) construct.newInstance(conf, taskId, (Reporter) progressable);
         } catch (Exception e) {
             throw new RuntimeException(e);
         }
@@ -71,7 +71,7 @@
 
     @Override
     public JobContext createJobContext(Configuration conf,
-            JobID jobId) {
+                                       JobID jobId) {
         JobContext ctxt = new JobContextImpl(conf, jobId);
 
         return ctxt;
@@ -79,15 +79,15 @@
 
     @Override
     public org.apache.hadoop.mapred.JobContext createJobContext(org.apache.hadoop.mapred.JobConf conf,
-            org.apache.hadoop.mapreduce.JobID jobId, Progressable progressable) {
-        org.apache.hadoop.mapred.JobContext newContext = 
-            new org.apache.hadoop.mapred.JobContextImpl(conf, jobId, (org.apache.hadoop.mapred.Reporter)progressable);
+                                                                org.apache.hadoop.mapreduce.JobID jobId, Progressable progressable) {
+        org.apache.hadoop.mapred.JobContext newContext =
+            new org.apache.hadoop.mapred.JobContextImpl(conf, jobId, (org.apache.hadoop.mapred.Reporter) progressable);
         return newContext;
     }
 
     @Override
     public void commitJob(OutputFormat outputFormat, ResourceSchema schema,
-            String arg1, Job job) throws IOException {
+                          String arg1, Job job) throws IOException {
         // Do nothing as this was fixed by MAPREDUCE-1447.
     }
 
@@ -106,12 +106,12 @@
     @Override
     public String getPropertyName(PropertyName name) {
         switch (name) {
-            case CACHE_ARCHIVES:
-                return MRJobConfig.CACHE_ARCHIVES;
-            case CACHE_FILES:
-                return MRJobConfig.CACHE_FILES;
-            case CACHE_SYMLINK:
-                return MRJobConfig.CACHE_SYMLINK;
+        case CACHE_ARCHIVES:
+            return MRJobConfig.CACHE_ARCHIVES;
+        case CACHE_FILES:
+            return MRJobConfig.CACHE_FILES;
+        case CACHE_SYMLINK:
+            return MRJobConfig.CACHE_SYMLINK;
         }
 
         return "";
diff --git a/src/java/org/apache/hcatalog/cli/HCatCli.java b/src/java/org/apache/hcatalog/cli/HCatCli.java
index 6bef627..f305fe2 100644
--- a/src/java/org/apache/hcatalog/cli/HCatCli.java
+++ b/src/java/org/apache/hcatalog/cli/HCatCli.java
@@ -53,278 +53,278 @@
 
 public class HCatCli {
 
-  @SuppressWarnings("static-access")
-  public static void main(String[] args) {
+    @SuppressWarnings("static-access")
+    public static void main(String[] args) {
 
-    try {
-      LogUtils.initHiveLog4j();
-    } catch (LogInitializationException e) {
+        try {
+            LogUtils.initHiveLog4j();
+        } catch (LogInitializationException e) {
 
-    }
-
-    CliSessionState ss = new CliSessionState(new HiveConf(SessionState.class));
-    ss.in = System.in;
-    try {
-      ss.out = new PrintStream(System.out, true, "UTF-8");
-      ss.err = new PrintStream(System.err, true, "UTF-8");
-    } catch (UnsupportedEncodingException e) {
-      System.exit(1);
-    }
-
-    HiveConf conf = ss.getConf();
-
-    HiveConf.setVar(conf, ConfVars.SEMANTIC_ANALYZER_HOOK, HCatSemanticAnalyzer.class.getName());
-
-    SessionState.start(ss);
-
-    Options options = new Options();
-
-    // -e 'quoted-query-string'
-    options.addOption(OptionBuilder
-        .hasArg()
-        .withArgName("exec")
-        .withDescription("hcat command given from command line")
-        .create('e'));
-
-    // -f <query-file>
-    options.addOption(OptionBuilder
-        .hasArg()
-        .withArgName("file")
-        .withDescription("hcat commands in file")
-        .create('f'));
-
-    // -g
-    options.addOption(OptionBuilder
-        .hasArg().
-        withArgName("group").
-        withDescription("group for the db/table specified in CREATE statement").
-        create('g'));
-
-    // -p
-    options.addOption(OptionBuilder
-        .hasArg()
-        .withArgName("perms")
-        .withDescription("permissions for the db/table specified in CREATE statement")
-        .create('p'));
-
-    // -D
-    options.addOption(OptionBuilder
-        .hasArgs(2)
-        .withArgName("property=value")
-        .withValueSeparator()
-        .withDescription("use hadoop value for given property")
-        .create('D'));
-
-    // [-h|--help]
-    options.addOption(new Option("h", "help", false, "Print help information"));
-
-    Parser parser = new GnuParser();
-    CommandLine cmdLine = null;
-
-    try {
-      cmdLine  = parser.parse(options,args);
-
-    } catch (ParseException e) {
-      printUsage(options, ss.err);
-      System.exit(1);
-    }
-    // -e
-    String execString = (String) cmdLine.getOptionValue('e');
-    // -f
-    String fileName = (String) cmdLine.getOptionValue('f');
-    // -h
-    if (cmdLine.hasOption('h')) {
-      printUsage(options,ss.out);
-      System.exit(0);
-    }
-
-    if (execString != null && fileName != null) {
-      ss.err.println("The '-e' and '-f' options cannot be specified simultaneously");
-      printUsage(options,ss.err);
-      System.exit(1);
-    }
-
-    // -p
-    String perms = (String)cmdLine.getOptionValue('p');
-    if(perms != null){
-      validatePermissions(ss, conf, perms);
-    }
-
-    // -g
-    String grp = (String) cmdLine.getOptionValue('g');
-    if(grp != null){
-      conf.set(HCatConstants.HCAT_GROUP, grp);
-    }
-
-    // -D
-    setConfProperties(conf, cmdLine.getOptionProperties("D"));
-
-    if (execString != null) {
-      System.exit(processLine(execString));
-    }
-
-    try {
-      if (fileName != null) {
-        System.exit(processFile(fileName));
-      }
-    } catch (FileNotFoundException e) {
-      ss.err.println("Input file not found. (" + e.getMessage() + ")");
-      System.exit(1);
-    } catch (IOException e) {
-      ss.err.println("Could not open input file for reading. (" + e.getMessage() + ")");
-      System.exit(1);
-    }
-
-    // -h
-    printUsage(options, ss.err);
-    System.exit(1);
-  }
-
-  private static void setConfProperties(HiveConf conf, Properties props) {
-    for(java.util.Map.Entry<Object, Object> e : props.entrySet())
-        conf.set((String) e.getKey(), (String) e.getValue());
-  }
-
-  private static int processLine(String line) {
-    int ret = 0;
-
-    String command = "";
-    for (String oneCmd : line.split(";")) {
-
-      if (StringUtils.endsWith(oneCmd, "\\")) {
-        command += StringUtils.chop(oneCmd) + ";";
-        continue;
-      } else {
-        command += oneCmd;
-      }
-      if (StringUtils.isBlank(command)) {
-        continue;
-      }
-
-      ret = processCmd(command);
-      command = "";
-    }
-    return ret;
-  }
-
-  private static int processFile(String fileName) throws IOException {
-    FileReader fileReader = null;
-    BufferedReader reader = null;
-    try {
-      fileReader = new FileReader(fileName);
-      reader = new BufferedReader(fileReader);
-      String line;
-      StringBuilder qsb = new StringBuilder();
-
-      while ((line = reader.readLine()) != null) {
-        qsb.append(line + "\n");
-      }
-
-      return (processLine(qsb.toString()));
-    } finally {
-      if (fileReader != null) {
-        fileReader.close();
-      }
-      if(reader != null) {
-        reader.close();
-      }
-    }
-  }
-
-  private static int processCmd(String cmd){
-
-    SessionState ss = SessionState.get();
-    long start = System.currentTimeMillis();
-
-    cmd = cmd.trim();
-    String firstToken = cmd.split("\\s+")[0].trim();
-
-    if(firstToken.equalsIgnoreCase("set")){
-      return new SetProcessor().run(cmd.substring(firstToken.length()).trim()).getResponseCode();
-    } else if (firstToken.equalsIgnoreCase("dfs")){
-      return new DfsProcessor(ss.getConf()).run(cmd.substring(firstToken.length()).trim()).getResponseCode();
-    }
-
-    HCatDriver driver = new HCatDriver();
-
-    int ret = driver.run(cmd).getResponseCode();
-
-    if (ret != 0) {
-      driver.close();
-      System.exit(ret);
-    }
-
-    ArrayList<String> res = new ArrayList<String>();
-    try {
-      while (driver.getResults(res)) {
-        for (String r : res) {
-          ss.out.println(r);
         }
-        res.clear();
-      }
-    } catch (IOException e) {
-      ss.err.println("Failed with exception " + e.getClass().getName() + ":"
-          + e.getMessage() + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
-      ret = 1;
-    } catch (CommandNeedRetryException e) {
-        ss.err.println("Failed with exception " + e.getClass().getName() + ":"
+
+        CliSessionState ss = new CliSessionState(new HiveConf(SessionState.class));
+        ss.in = System.in;
+        try {
+            ss.out = new PrintStream(System.out, true, "UTF-8");
+            ss.err = new PrintStream(System.err, true, "UTF-8");
+        } catch (UnsupportedEncodingException e) {
+            System.exit(1);
+        }
+
+        HiveConf conf = ss.getConf();
+
+        HiveConf.setVar(conf, ConfVars.SEMANTIC_ANALYZER_HOOK, HCatSemanticAnalyzer.class.getName());
+
+        SessionState.start(ss);
+
+        Options options = new Options();
+
+        // -e 'quoted-query-string'
+        options.addOption(OptionBuilder
+                .hasArg()
+                .withArgName("exec")
+                .withDescription("hcat command given from command line")
+                .create('e'));
+
+        // -f <query-file>
+        options.addOption(OptionBuilder
+                .hasArg()
+                .withArgName("file")
+                .withDescription("hcat commands in file")
+                .create('f'));
+
+        // -g
+        options.addOption(OptionBuilder
+                .hasArg().
+                withArgName("group").
+                withDescription("group for the db/table specified in CREATE statement").
+                create('g'));
+
+        // -p
+        options.addOption(OptionBuilder
+                .hasArg()
+                .withArgName("perms")
+                .withDescription("permissions for the db/table specified in CREATE statement")
+                .create('p'));
+
+        // -D
+        options.addOption(OptionBuilder
+                .hasArgs(2)
+                .withArgName("property=value")
+                .withValueSeparator()
+                .withDescription("use hadoop value for given property")
+                .create('D'));
+
+        // [-h|--help]
+        options.addOption(new Option("h", "help", false, "Print help information"));
+
+        Parser parser = new GnuParser();
+        CommandLine cmdLine = null;
+
+        try {
+            cmdLine = parser.parse(options, args);
+
+        } catch (ParseException e) {
+            printUsage(options, ss.err);
+            System.exit(1);
+        }
+        // -e
+        String execString = (String) cmdLine.getOptionValue('e');
+        // -f
+        String fileName = (String) cmdLine.getOptionValue('f');
+        // -h
+        if (cmdLine.hasOption('h')) {
+            printUsage(options, ss.out);
+            System.exit(0);
+        }
+
+        if (execString != null && fileName != null) {
+            ss.err.println("The '-e' and '-f' options cannot be specified simultaneously");
+            printUsage(options, ss.err);
+            System.exit(1);
+        }
+
+        // -p
+        String perms = (String) cmdLine.getOptionValue('p');
+        if (perms != null) {
+            validatePermissions(ss, conf, perms);
+        }
+
+        // -g
+        String grp = (String) cmdLine.getOptionValue('g');
+        if (grp != null) {
+            conf.set(HCatConstants.HCAT_GROUP, grp);
+        }
+
+        // -D
+        setConfProperties(conf, cmdLine.getOptionProperties("D"));
+
+        if (execString != null) {
+            System.exit(processLine(execString));
+        }
+
+        try {
+            if (fileName != null) {
+                System.exit(processFile(fileName));
+            }
+        } catch (FileNotFoundException e) {
+            ss.err.println("Input file not found. (" + e.getMessage() + ")");
+            System.exit(1);
+        } catch (IOException e) {
+            ss.err.println("Could not open input file for reading. (" + e.getMessage() + ")");
+            System.exit(1);
+        }
+
+        // -h
+        printUsage(options, ss.err);
+        System.exit(1);
+    }
+
+    private static void setConfProperties(HiveConf conf, Properties props) {
+        for (java.util.Map.Entry<Object, Object> e : props.entrySet())
+            conf.set((String) e.getKey(), (String) e.getValue());
+    }
+
+    private static int processLine(String line) {
+        int ret = 0;
+
+        String command = "";
+        for (String oneCmd : line.split(";")) {
+
+            if (StringUtils.endsWith(oneCmd, "\\")) {
+                command += StringUtils.chop(oneCmd) + ";";
+                continue;
+            } else {
+                command += oneCmd;
+            }
+            if (StringUtils.isBlank(command)) {
+                continue;
+            }
+
+            ret = processCmd(command);
+            command = "";
+        }
+        return ret;
+    }
+
+    private static int processFile(String fileName) throws IOException {
+        FileReader fileReader = null;
+        BufferedReader reader = null;
+        try {
+            fileReader = new FileReader(fileName);
+            reader = new BufferedReader(fileReader);
+            String line;
+            StringBuilder qsb = new StringBuilder();
+
+            while ((line = reader.readLine()) != null) {
+                qsb.append(line + "\n");
+            }
+
+            return (processLine(qsb.toString()));
+        } finally {
+            if (fileReader != null) {
+                fileReader.close();
+            }
+            if (reader != null) {
+                reader.close();
+            }
+        }
+    }
+
+    private static int processCmd(String cmd) {
+
+        SessionState ss = SessionState.get();
+        long start = System.currentTimeMillis();
+
+        cmd = cmd.trim();
+        String firstToken = cmd.split("\\s+")[0].trim();
+
+        if (firstToken.equalsIgnoreCase("set")) {
+            return new SetProcessor().run(cmd.substring(firstToken.length()).trim()).getResponseCode();
+        } else if (firstToken.equalsIgnoreCase("dfs")) {
+            return new DfsProcessor(ss.getConf()).run(cmd.substring(firstToken.length()).trim()).getResponseCode();
+        }
+
+        HCatDriver driver = new HCatDriver();
+
+        int ret = driver.run(cmd).getResponseCode();
+
+        if (ret != 0) {
+            driver.close();
+            System.exit(ret);
+        }
+
+        ArrayList<String> res = new ArrayList<String>();
+        try {
+            while (driver.getResults(res)) {
+                for (String r : res) {
+                    ss.out.println(r);
+                }
+                res.clear();
+            }
+        } catch (IOException e) {
+            ss.err.println("Failed with exception " + e.getClass().getName() + ":"
                 + e.getMessage() + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
             ret = 1;
+        } catch (CommandNeedRetryException e) {
+            ss.err.println("Failed with exception " + e.getClass().getName() + ":"
+                + e.getMessage() + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
+            ret = 1;
+        }
+
+        int cret = driver.close();
+        if (ret == 0) {
+            ret = cret;
+        }
+
+        long end = System.currentTimeMillis();
+        if (end > start) {
+            double timeTaken = (end - start) / 1000.0;
+            ss.err.println("Time taken: " + timeTaken + " seconds");
+        }
+        return ret;
     }
 
-    int cret = driver.close();
-    if (ret == 0) {
-      ret = cret;
+    private static void printUsage(Options options, OutputStream os) {
+        PrintWriter pw = new PrintWriter(os);
+        new HelpFormatter().printHelp(pw, 2 * HelpFormatter.DEFAULT_WIDTH,
+            "hcat { -e \"<query>\" | -f \"<filepath>\" } [ -g \"<group>\" ] [ -p \"<perms>\" ] [ -D\"<name>=<value>\" ]",
+            null, options, HelpFormatter.DEFAULT_LEFT_PAD, HelpFormatter.DEFAULT_DESC_PAD,
+            null, false);
+        pw.flush();
     }
 
-    long end = System.currentTimeMillis();
-    if (end > start) {
-      double timeTaken = (end - start) / 1000.0;
-      ss.err.println("Time taken: " + timeTaken + " seconds");
+    private static void validatePermissions(CliSessionState ss, HiveConf conf, String perms) {
+        perms = perms.trim();
+        FsPermission fp = null;
+
+        if (perms.matches("^\\s*([r,w,x,-]{9})\\s*$")) {
+            fp = FsPermission.valueOf("d" + perms);
+        } else if (perms.matches("^\\s*([0-7]{3})\\s*$")) {
+            fp = new FsPermission(Short.decode("0" + perms));
+        } else {
+            ss.err.println("Invalid permission specification: " + perms);
+            System.exit(1);
+        }
+
+        if (!HCatUtil.validateMorePermissive(fp.getUserAction(), fp.getGroupAction())) {
+            ss.err.println("Invalid permission specification: " + perms + " : user permissions must be more permissive than group permission ");
+            System.exit(1);
+        }
+        if (!HCatUtil.validateMorePermissive(fp.getGroupAction(), fp.getOtherAction())) {
+            ss.err.println("Invalid permission specification: " + perms + " : group permissions must be more permissive than other permission ");
+            System.exit(1);
+        }
+        if ((!HCatUtil.validateExecuteBitPresentIfReadOrWrite(fp.getUserAction())) ||
+            (!HCatUtil.validateExecuteBitPresentIfReadOrWrite(fp.getGroupAction())) ||
+            (!HCatUtil.validateExecuteBitPresentIfReadOrWrite(fp.getOtherAction()))) {
+            ss.err.println("Invalid permission specification: " + perms + " : permissions must have execute permissions if read or write permissions are specified ");
+            System.exit(1);
+        }
+
+        conf.set(HCatConstants.HCAT_PERMS, "d" + fp.toString());
+
     }
-    return ret;
-  }
-
-  private static void printUsage(Options options, OutputStream os) {
-    PrintWriter pw = new PrintWriter(os);
-    new HelpFormatter().printHelp(pw, 2 * HelpFormatter.DEFAULT_WIDTH,
-      "hcat { -e \"<query>\" | -f \"<filepath>\" } [ -g \"<group>\" ] [ -p \"<perms>\" ] [ -D\"<name>=<value>\" ]",
-        null,options, HelpFormatter.DEFAULT_LEFT_PAD,HelpFormatter.DEFAULT_DESC_PAD,
-        null, false);
-    pw.flush();
-  }
-
-  private static void validatePermissions(CliSessionState ss, HiveConf conf, String perms) {
-    perms = perms.trim();
-    FsPermission fp = null;
-
-    if (perms.matches("^\\s*([r,w,x,-]{9})\\s*$")){
-      fp = FsPermission.valueOf("d"+perms);
-    } else if (perms.matches("^\\s*([0-7]{3})\\s*$")){
-      fp = new FsPermission(Short.decode("0"+perms));
-    } else {
-      ss.err.println("Invalid permission specification: "+perms);
-      System.exit(1);
-    }
-
-    if (!HCatUtil.validateMorePermissive(fp.getUserAction(),fp.getGroupAction())){
-      ss.err.println("Invalid permission specification: "+perms+" : user permissions must be more permissive than group permission ");
-      System.exit(1);
-    }
-    if (!HCatUtil.validateMorePermissive(fp.getGroupAction(),fp.getOtherAction())){
-      ss.err.println("Invalid permission specification: "+perms+" : group permissions must be more permissive than other permission ");
-      System.exit(1);
-    }
-    if ( (!HCatUtil.validateExecuteBitPresentIfReadOrWrite(fp.getUserAction())) ||
-        (!HCatUtil.validateExecuteBitPresentIfReadOrWrite(fp.getGroupAction())) ||
-        (!HCatUtil.validateExecuteBitPresentIfReadOrWrite(fp.getOtherAction())) ) {
-      ss.err.println("Invalid permission specification: "+perms+" : permissions must have execute permissions if read or write permissions are specified ");
-      System.exit(1);
-    }
-
-    conf.set(HCatConstants.HCAT_PERMS, "d"+fp.toString());
-
-  }
 
 
 }
diff --git a/src/java/org/apache/hcatalog/cli/HCatDriver.java b/src/java/org/apache/hcatalog/cli/HCatDriver.java
index 3c6ba1d..79b7a83 100644
--- a/src/java/org/apache/hcatalog/cli/HCatDriver.java
+++ b/src/java/org/apache/hcatalog/cli/HCatDriver.java
@@ -34,109 +34,109 @@
 
 public class HCatDriver extends Driver {
 
-  @Override
-  public CommandProcessorResponse run(String command) {
+    @Override
+    public CommandProcessorResponse run(String command) {
 
-    CommandProcessorResponse cpr = null;
-    try {
-      cpr = super.run(command);
-    } catch (CommandNeedRetryException e) {
-      return new CommandProcessorResponse(-1, e.toString(), "");
-    }
-
-    SessionState ss = SessionState.get();
-
-    if (cpr.getResponseCode() == 0){
-      // Only attempt to do this, if cmd was successful.
-      int rc = setFSPermsNGrp(ss);
-      cpr = new CommandProcessorResponse(rc);
-    }
-    // reset conf vars
-    ss.getConf().set(HCatConstants.HCAT_CREATE_DB_NAME, "");
-    ss.getConf().set(HCatConstants.HCAT_CREATE_TBL_NAME, "");
-
-    return cpr;
-  }
-
-  private int setFSPermsNGrp(SessionState ss) {
-
-    Configuration conf =ss.getConf();
-
-    String tblName = conf.get(HCatConstants.HCAT_CREATE_TBL_NAME,"");
-    if (tblName.isEmpty()) {
-      tblName = conf.get("import.destination.table", "");
-      conf.set("import.destination.table", "");
-    }
-    String dbName = conf.get(HCatConstants.HCAT_CREATE_DB_NAME, "");
-    String grp = conf.get(HCatConstants.HCAT_GROUP,null);
-    String permsStr = conf.get(HCatConstants.HCAT_PERMS,null);
-
-    if(tblName.isEmpty() && dbName.isEmpty()){
-      // it wasn't create db/table
-      return 0;
-    }
-
-    if(null == grp && null == permsStr) {
-      // there were no grp and perms to begin with.
-      return 0;
-    }
-
-    FsPermission perms = FsPermission.valueOf(permsStr);
-
-    if(!tblName.isEmpty()){
-      Hive db = null;
-      try{
-        db = Hive.get();
-        Table tbl =  db.getTable(tblName);
-        Path tblPath = tbl.getPath();
-
-        FileSystem fs = tblPath.getFileSystem(conf);
-        if(null != perms){
-          fs.setPermission(tblPath, perms);
+        CommandProcessorResponse cpr = null;
+        try {
+            cpr = super.run(command);
+        } catch (CommandNeedRetryException e) {
+            return new CommandProcessorResponse(-1, e.toString(), "");
         }
-        if(null != grp){
-          fs.setOwner(tblPath, null, grp);
-        }
-        return 0;
 
-      } catch (Exception e){
-          ss.err.println(String.format("Failed to set permissions/groups on TABLE: <%s> %s",tblName,e.getMessage()));
-          try {  // We need to drop the table.
-            if(null != db){ db.dropTable(tblName); }
-          } catch (HiveException he) {
-            ss.err.println(String.format("Failed to drop TABLE <%s> after failing to set permissions/groups on it. %s",tblName,e.getMessage()));
-          }
-          return 1;
-      }
-    }
-    else{
-      // looks like a db operation
-      if (dbName.isEmpty() || dbName.equals(MetaStoreUtils.DEFAULT_DATABASE_NAME)){
-        // We dont set perms or groups for default dir.
-        return 0;
-      }
-      else{
-        try{
-          Hive db = Hive.get();
-          Path dbPath = new Warehouse(conf).getDatabasePath(db.getDatabase(dbName));
-          FileSystem fs = dbPath.getFileSystem(conf);
-          if(perms != null){
-            fs.setPermission(dbPath, perms);
-          }
-          if(null != grp){
-            fs.setOwner(dbPath, null, grp);
-          }
-          return 0;
-        } catch (Exception e){
-          ss.err.println(String.format("Failed to set permissions and/or group on DB: <%s> %s", dbName, e.getMessage()));
-          try {
-            Hive.get().dropDatabase(dbName);
-          } catch (Exception e1) {
-            ss.err.println(String.format("Failed to drop DB <%s> after failing to set permissions/group on it. %s", dbName, e1.getMessage()));
-          }
-          return 1;
+        SessionState ss = SessionState.get();
+
+        if (cpr.getResponseCode() == 0) {
+            // Only attempt to do this, if cmd was successful.
+            int rc = setFSPermsNGrp(ss);
+            cpr = new CommandProcessorResponse(rc);
         }
-      }
+        // reset conf vars
+        ss.getConf().set(HCatConstants.HCAT_CREATE_DB_NAME, "");
+        ss.getConf().set(HCatConstants.HCAT_CREATE_TBL_NAME, "");
+
+        return cpr;
     }
-  }
+
+    private int setFSPermsNGrp(SessionState ss) {
+
+        Configuration conf = ss.getConf();
+
+        String tblName = conf.get(HCatConstants.HCAT_CREATE_TBL_NAME, "");
+        if (tblName.isEmpty()) {
+            tblName = conf.get("import.destination.table", "");
+            conf.set("import.destination.table", "");
+        }
+        String dbName = conf.get(HCatConstants.HCAT_CREATE_DB_NAME, "");
+        String grp = conf.get(HCatConstants.HCAT_GROUP, null);
+        String permsStr = conf.get(HCatConstants.HCAT_PERMS, null);
+
+        if (tblName.isEmpty() && dbName.isEmpty()) {
+            // it wasn't create db/table
+            return 0;
+        }
+
+        if (null == grp && null == permsStr) {
+            // there were no grp and perms to begin with.
+            return 0;
+        }
+
+        FsPermission perms = FsPermission.valueOf(permsStr);
+
+        if (!tblName.isEmpty()) {
+            Hive db = null;
+            try {
+                db = Hive.get();
+                Table tbl = db.getTable(tblName);
+                Path tblPath = tbl.getPath();
+
+                FileSystem fs = tblPath.getFileSystem(conf);
+                if (null != perms) {
+                    fs.setPermission(tblPath, perms);
+                }
+                if (null != grp) {
+                    fs.setOwner(tblPath, null, grp);
+                }
+                return 0;
+
+            } catch (Exception e) {
+                ss.err.println(String.format("Failed to set permissions/groups on TABLE: <%s> %s", tblName, e.getMessage()));
+                try {  // We need to drop the table.
+                    if (null != db) {
+                        db.dropTable(tblName);
+                    }
+                } catch (HiveException he) {
+                    ss.err.println(String.format("Failed to drop TABLE <%s> after failing to set permissions/groups on it. %s", tblName, e.getMessage()));
+                }
+                return 1;
+            }
+        } else {
+            // looks like a db operation
+            if (dbName.isEmpty() || dbName.equals(MetaStoreUtils.DEFAULT_DATABASE_NAME)) {
+                // We dont set perms or groups for default dir.
+                return 0;
+            } else {
+                try {
+                    Hive db = Hive.get();
+                    Path dbPath = new Warehouse(conf).getDatabasePath(db.getDatabase(dbName));
+                    FileSystem fs = dbPath.getFileSystem(conf);
+                    if (perms != null) {
+                        fs.setPermission(dbPath, perms);
+                    }
+                    if (null != grp) {
+                        fs.setOwner(dbPath, null, grp);
+                    }
+                    return 0;
+                } catch (Exception e) {
+                    ss.err.println(String.format("Failed to set permissions and/or group on DB: <%s> %s", dbName, e.getMessage()));
+                    try {
+                        Hive.get().dropDatabase(dbName);
+                    } catch (Exception e1) {
+                        ss.err.println(String.format("Failed to drop DB <%s> after failing to set permissions/group on it. %s", dbName, e1.getMessage()));
+                    }
+                    return 1;
+                }
+            }
+        }
+    }
 }
diff --git a/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java b/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java
index 71e8f39..9fa6e85 100644
--- a/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java
+++ b/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java
@@ -34,62 +34,62 @@
 import org.apache.hadoop.hive.ql.security.authorization.Privilege;
 import org.apache.hcatalog.common.HCatConstants;
 
-final class CreateDatabaseHook  extends HCatSemanticAnalyzerBase {
+final class CreateDatabaseHook extends HCatSemanticAnalyzerBase {
 
-  String databaseName;
+    String databaseName;
 
-  @Override
-  public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast)
-  throws SemanticException {
+    @Override
+    public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast)
+        throws SemanticException {
 
-    Hive db;
-    try {
-      db = context.getHive();
-    } catch (HiveException e) {
-      throw new SemanticException("Couldn't get Hive DB instance in semantic analysis phase.", e);
-    }
-
-    // Analyze and create tbl properties object
-    int numCh = ast.getChildCount();
-
-    databaseName = BaseSemanticAnalyzer.getUnescapedName((ASTNode)ast.getChild(0));
-
-    for (int num = 1; num < numCh; num++) {
-      ASTNode child = (ASTNode) ast.getChild(num);
-
-      switch (child.getToken().getType()) {
-
-      case HiveParser.TOK_IFNOTEXISTS:
+        Hive db;
         try {
-          List<String> dbs = db.getDatabasesByPattern(databaseName);
-          if (dbs != null && dbs.size() > 0) { // db exists
-            return ast;
-          }
+            db = context.getHive();
         } catch (HiveException e) {
-          throw new SemanticException(e);
+            throw new SemanticException("Couldn't get Hive DB instance in semantic analysis phase.", e);
         }
-        break;
-      }
+
+        // Analyze and create tbl properties object
+        int numCh = ast.getChildCount();
+
+        databaseName = BaseSemanticAnalyzer.getUnescapedName((ASTNode) ast.getChild(0));
+
+        for (int num = 1; num < numCh; num++) {
+            ASTNode child = (ASTNode) ast.getChild(num);
+
+            switch (child.getToken().getType()) {
+
+            case HiveParser.TOK_IFNOTEXISTS:
+                try {
+                    List<String> dbs = db.getDatabasesByPattern(databaseName);
+                    if (dbs != null && dbs.size() > 0) { // db exists
+                        return ast;
+                    }
+                } catch (HiveException e) {
+                    throw new SemanticException(e);
+                }
+                break;
+            }
+        }
+
+        return ast;
     }
 
-    return ast;
-  }
-
-  @Override
-  public void postAnalyze(HiveSemanticAnalyzerHookContext context,
-      List<Task<? extends Serializable>> rootTasks) throws SemanticException {
-    context.getConf().set(HCatConstants.HCAT_CREATE_DB_NAME, databaseName);
-    super.postAnalyze(context, rootTasks);
-  }
-  
-  @Override
-  protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext context,
-      Hive hive, DDLWork work) throws HiveException {
-    CreateDatabaseDesc createDb = work.getCreateDatabaseDesc();
-    if (createDb != null) {
-      Database db = new Database(createDb.getName(), createDb.getComment(), 
-          createDb.getLocationUri(), createDb.getDatabaseProperties());
-      authorize(db, Privilege.CREATE);
+    @Override
+    public void postAnalyze(HiveSemanticAnalyzerHookContext context,
+                            List<Task<? extends Serializable>> rootTasks) throws SemanticException {
+        context.getConf().set(HCatConstants.HCAT_CREATE_DB_NAME, databaseName);
+        super.postAnalyze(context, rootTasks);
     }
-  }
+
+    @Override
+    protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext context,
+                                    Hive hive, DDLWork work) throws HiveException {
+        CreateDatabaseDesc createDb = work.getCreateDatabaseDesc();
+        if (createDb != null) {
+            Database db = new Database(createDb.getName(), createDb.getComment(),
+                createDb.getLocationUri(), createDb.getDatabaseProperties());
+            authorize(db, Privilege.CREATE);
+        }
+    }
 }
diff --git a/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java b/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java
index 169e776..002f057 100644
--- a/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java
+++ b/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java
@@ -48,18 +48,18 @@
 final class CreateTableHook extends HCatSemanticAnalyzerBase {
 
     private String tableName;
-    
+
     @Override
     public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context,
-            ASTNode ast) throws SemanticException {
+                              ASTNode ast) throws SemanticException {
 
         Hive db;
         try {
             db = context.getHive();
         } catch (HiveException e) {
             throw new SemanticException(
-                    "Couldn't get Hive DB instance in semantic analysis phase.",
-                    e);
+                "Couldn't get Hive DB instance in semantic analysis phase.",
+                e);
         }
 
         // Analyze and create tbl properties object
@@ -67,7 +67,7 @@
 
         String inputFormat = null, outputFormat = null;
         tableName = BaseSemanticAnalyzer.getUnescapedName((ASTNode) ast
-                .getChild(0));
+            .getChild(0));
         boolean likeTable = false;
 
         for (int num = 1; num < numCh; num++) {
@@ -75,89 +75,89 @@
 
             switch (child.getToken().getType()) {
 
-                case HiveParser.TOK_QUERY: // CTAS
-                    throw new SemanticException(
-                            "Operation not supported. Create table as " +
-                            "Select is not a valid operation.");
+            case HiveParser.TOK_QUERY: // CTAS
+                throw new SemanticException(
+                    "Operation not supported. Create table as " +
+                        "Select is not a valid operation.");
 
-                case HiveParser.TOK_TABLEBUCKETS:
-                    break;
+            case HiveParser.TOK_TABLEBUCKETS:
+                break;
 
-                case HiveParser.TOK_TBLSEQUENCEFILE:
-                    inputFormat = HCatConstants.SEQUENCEFILE_INPUT;
-                    outputFormat = HCatConstants.SEQUENCEFILE_OUTPUT;
-                    break;
+            case HiveParser.TOK_TBLSEQUENCEFILE:
+                inputFormat = HCatConstants.SEQUENCEFILE_INPUT;
+                outputFormat = HCatConstants.SEQUENCEFILE_OUTPUT;
+                break;
 
-                case HiveParser.TOK_TBLTEXTFILE:
-                    inputFormat      = org.apache.hadoop.mapred.TextInputFormat.class.getName();
-                    outputFormat     = org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat.class.getName();
+            case HiveParser.TOK_TBLTEXTFILE:
+                inputFormat = org.apache.hadoop.mapred.TextInputFormat.class.getName();
+                outputFormat = org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat.class.getName();
 
-                    break;
+                break;
 
-                case HiveParser.TOK_LIKETABLE:
-                    likeTable = true;
-                    break;
+            case HiveParser.TOK_LIKETABLE:
+                likeTable = true;
+                break;
 
-                case HiveParser.TOK_IFNOTEXISTS:
-                    try {
-                        List<String> tables = db.getTablesByPattern(tableName);
-                        if (tables != null && tables.size() > 0) { // table
-                                                                   // exists
-                            return ast;
-                        }
-                    } catch (HiveException e) {
-                        throw new SemanticException(e);
-                    }
-                    break;
-
-                case HiveParser.TOK_TABLEPARTCOLS:
-                    List<FieldSchema> partCols = BaseSemanticAnalyzer
-                            .getColumns((ASTNode) child.getChild(0), false);
-                    for (FieldSchema fs : partCols) {
-                        if (!fs.getType().equalsIgnoreCase("string")) {
-                            throw new SemanticException(
-                                    "Operation not supported. HCatalog only " +
-                                    "supports partition columns of type string. "
-                                            + "For column: "
-                                            + fs.getName()
-                                            + " Found type: " + fs.getType());
-                        }
-                    }
-                    break;
-
-                case HiveParser.TOK_STORAGEHANDLER:
-                    String storageHandler = BaseSemanticAnalyzer
-                            .unescapeSQLString(child.getChild(0).getText());
-                    if (org.apache.commons.lang.StringUtils
-                            .isNotEmpty(storageHandler)) {
+            case HiveParser.TOK_IFNOTEXISTS:
+                try {
+                    List<String> tables = db.getTablesByPattern(tableName);
+                    if (tables != null && tables.size() > 0) { // table
+                        // exists
                         return ast;
                     }
+                } catch (HiveException e) {
+                    throw new SemanticException(e);
+                }
+                break;
 
-                    break;
-
-                case HiveParser.TOK_TABLEFILEFORMAT:
-                    if (child.getChildCount() < 2) {
+            case HiveParser.TOK_TABLEPARTCOLS:
+                List<FieldSchema> partCols = BaseSemanticAnalyzer
+                    .getColumns((ASTNode) child.getChild(0), false);
+                for (FieldSchema fs : partCols) {
+                    if (!fs.getType().equalsIgnoreCase("string")) {
                         throw new SemanticException(
-                                "Incomplete specification of File Format. " +
-                                "You must provide InputFormat, OutputFormat.");
+                            "Operation not supported. HCatalog only " +
+                                "supports partition columns of type string. "
+                                + "For column: "
+                                + fs.getName()
+                                + " Found type: " + fs.getType());
                     }
-                    inputFormat = BaseSemanticAnalyzer.unescapeSQLString(child
-                            .getChild(0).getText());
-                    outputFormat = BaseSemanticAnalyzer.unescapeSQLString(child
-                            .getChild(1).getText());
-                    break;
+                }
+                break;
 
-                case HiveParser.TOK_TBLRCFILE:
-                    inputFormat = RCFileInputFormat.class.getName();
-                    outputFormat = RCFileOutputFormat.class.getName();
-                    break;
+            case HiveParser.TOK_STORAGEHANDLER:
+                String storageHandler = BaseSemanticAnalyzer
+                    .unescapeSQLString(child.getChild(0).getText());
+                if (org.apache.commons.lang.StringUtils
+                    .isNotEmpty(storageHandler)) {
+                    return ast;
+                }
+
+                break;
+
+            case HiveParser.TOK_TABLEFILEFORMAT:
+                if (child.getChildCount() < 2) {
+                    throw new SemanticException(
+                        "Incomplete specification of File Format. " +
+                            "You must provide InputFormat, OutputFormat.");
+                }
+                inputFormat = BaseSemanticAnalyzer.unescapeSQLString(child
+                    .getChild(0).getText());
+                outputFormat = BaseSemanticAnalyzer.unescapeSQLString(child
+                    .getChild(1).getText());
+                break;
+
+            case HiveParser.TOK_TBLRCFILE:
+                inputFormat = RCFileInputFormat.class.getName();
+                outputFormat = RCFileOutputFormat.class.getName();
+                break;
 
             }
         }
-        
+
         if (!likeTable && (inputFormat == null || outputFormat == null)) {
             throw new SemanticException(
-                    "STORED AS specification is either incomplete or incorrect.");
+                "STORED AS specification is either incomplete or incorrect.");
         }
 
 
@@ -166,8 +166,8 @@
 
     @Override
     public void postAnalyze(HiveSemanticAnalyzerHookContext context,
-            List<Task<? extends Serializable>> rootTasks)
-            throws SemanticException {
+                            List<Task<? extends Serializable>> rootTasks)
+        throws SemanticException {
 
         if (rootTasks.size() == 0) {
             // There will be no DDL task created in case if its CREATE TABLE IF
@@ -175,12 +175,12 @@
             return;
         }
         CreateTableDesc desc = ((DDLTask) rootTasks.get(rootTasks.size() - 1))
-                .getWork().getCreateTblDesc();
+            .getWork().getCreateTblDesc();
         if (desc == null) {
-          // Desc will be null if its CREATE TABLE LIKE. Desc will be
-          // contained in CreateTableLikeDesc. Currently, HCat disallows CTLT in
-          // pre-hook. So, desc can never be null.
-          return;
+            // Desc will be null if its CREATE TABLE LIKE. Desc will be
+            // contained in CreateTableLikeDesc. Currently, HCat disallows CTLT in
+            // pre-hook. So, desc can never be null.
+            return;
         }
         Map<String, String> tblProps = desc.getTblProps();
         if (tblProps == null) {
@@ -196,11 +196,11 @@
         } else {
             try {
                 HCatStorageHandler storageHandlerInst = HCatUtil
-                        .getStorageHandler(context.getConf(),
-                                                     desc.getStorageHandler(),
-                                                     desc.getSerName(),
-                                                     desc.getInputFormat(),
-                                                     desc.getOutputFormat());
+                    .getStorageHandler(context.getConf(),
+                        desc.getStorageHandler(),
+                        desc.getSerName(),
+                        desc.getInputFormat(),
+                        desc.getOutputFormat());
                 //Authorization checks are performed by the storageHandler.getAuthorizationProvider(), if  
                 //StorageDelegationAuthorizationProvider is used.
             } catch (IOException e) {
@@ -209,33 +209,33 @@
         }
 
         if (desc != null) {
-          try {
-            Table table = context.getHive().newTable(desc.getTableName());
-            if (desc.getLocation() != null) {
-              table.setDataLocation(new Path(desc.getLocation()).toUri());
+            try {
+                Table table = context.getHive().newTable(desc.getTableName());
+                if (desc.getLocation() != null) {
+                    table.setDataLocation(new Path(desc.getLocation()).toUri());
+                }
+                if (desc.getStorageHandler() != null) {
+                    table.setProperty(
+                        org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE,
+                        desc.getStorageHandler());
+                }
+                for (Map.Entry<String, String> prop : tblProps.entrySet()) {
+                    table.setProperty(prop.getKey(), prop.getValue());
+                }
+                for (Map.Entry<String, String> prop : desc.getSerdeProps().entrySet()) {
+                    table.setSerdeParam(prop.getKey(), prop.getValue());
+                }
+                //TODO: set other Table properties as needed
+
+                //authorize against the table operation so that location permissions can be checked if any
+
+                if (HiveConf.getBoolVar(context.getConf(),
+                    HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
+                    authorize(table, Privilege.CREATE);
+                }
+            } catch (HiveException ex) {
+                throw new SemanticException(ex);
             }
-            if (desc.getStorageHandler() != null) {
-              table.setProperty(
-                org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE,
-                desc.getStorageHandler());
-            }
-            for (Map.Entry<String, String> prop : tblProps.entrySet()) {
-              table.setProperty(prop.getKey(), prop.getValue());
-            }
-            for (Map.Entry<String, String> prop : desc.getSerdeProps().entrySet()) {
-              table.setSerdeParam(prop.getKey(), prop.getValue());
-            }
-            //TODO: set other Table properties as needed
-  
-            //authorize against the table operation so that location permissions can be checked if any
-            
-            if (HiveConf.getBoolVar(context.getConf(),
-                HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
-              authorize(table, Privilege.CREATE);
-            }
-          } catch (HiveException ex) {
-            throw new SemanticException(ex);
-          }
         }
 
         desc.setTblProps(tblProps);
diff --git a/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java b/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
index 2bd7490..b5151d0 100644
--- a/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
+++ b/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
@@ -50,326 +50,325 @@
 
 public class HCatSemanticAnalyzer extends HCatSemanticAnalyzerBase {
 
-  private AbstractSemanticAnalyzerHook hook;
-  private ASTNode ast;
+    private AbstractSemanticAnalyzerHook hook;
+    private ASTNode ast;
 
 
+    @Override
+    public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast)
+        throws SemanticException {
 
-  @Override
-  public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast)
-      throws SemanticException {
+        this.ast = ast;
+        switch (ast.getToken().getType()) {
 
-      this.ast = ast;
-      switch (ast.getToken().getType()) {
+        // HCat wants to intercept following tokens and special-handle them.
+        case HiveParser.TOK_CREATETABLE:
+            hook = new CreateTableHook();
+            return hook.preAnalyze(context, ast);
 
-      // HCat wants to intercept following tokens and special-handle them.
-      case HiveParser.TOK_CREATETABLE:
-        hook = new CreateTableHook();
-        return hook.preAnalyze(context, ast);
+        case HiveParser.TOK_CREATEDATABASE:
+            hook = new CreateDatabaseHook();
+            return hook.preAnalyze(context, ast);
 
-      case HiveParser.TOK_CREATEDATABASE:
-        hook = new CreateDatabaseHook();
-        return hook.preAnalyze(context, ast);
+        case HiveParser.TOK_ALTERTABLE_PARTITION:
+            if (((ASTNode) ast.getChild(1)).getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT) {
+                return ast;
+            } else if (((ASTNode) ast.getChild(1)).getToken().getType() == HiveParser.TOK_ALTERTABLE_ALTERPARTS_MERGEFILES) {
+                // unsupported
+                throw new SemanticException("Operation not supported.");
+            } else {
+                return ast;
+            }
 
-      case HiveParser.TOK_ALTERTABLE_PARTITION:
-          if (((ASTNode)ast.getChild(1)).getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT) {
+            // HCat will allow these operations to be performed.
+            // Database DDL
+        case HiveParser.TOK_SHOWDATABASES:
+        case HiveParser.TOK_DROPDATABASE:
+        case HiveParser.TOK_SWITCHDATABASE:
+        case HiveParser.TOK_DESCDATABASE:
+        case HiveParser.TOK_ALTERDATABASE_PROPERTIES:
+
+            // Index DDL
+        case HiveParser.TOK_ALTERINDEX_PROPERTIES:
+        case HiveParser.TOK_CREATEINDEX:
+        case HiveParser.TOK_DROPINDEX:
+        case HiveParser.TOK_SHOWINDEXES:
+
+            // View DDL
+            // "alter view add partition" does not work because of the nature of implementation
+            // of the DDL in hive. Hive will internally invoke another Driver on the select statement,
+            // and HCat does not let "select" statement through. I cannot find a way to get around it
+            // without modifying hive code. So just leave it unsupported.
+            //case HiveParser.TOK_ALTERVIEW_ADDPARTS:
+        case HiveParser.TOK_ALTERVIEW_DROPPARTS:
+        case HiveParser.TOK_ALTERVIEW_PROPERTIES:
+        case HiveParser.TOK_ALTERVIEW_RENAME:
+        case HiveParser.TOK_CREATEVIEW:
+        case HiveParser.TOK_DROPVIEW:
+
+            // Authorization DDL
+        case HiveParser.TOK_CREATEROLE:
+        case HiveParser.TOK_DROPROLE:
+        case HiveParser.TOK_GRANT_ROLE:
+        case HiveParser.TOK_GRANT_WITH_OPTION:
+        case HiveParser.TOK_GRANT:
+        case HiveParser.TOK_REVOKE_ROLE:
+        case HiveParser.TOK_REVOKE:
+        case HiveParser.TOK_SHOW_GRANT:
+        case HiveParser.TOK_SHOW_ROLE_GRANT:
+
+            // Misc DDL
+        case HiveParser.TOK_LOCKTABLE:
+        case HiveParser.TOK_UNLOCKTABLE:
+        case HiveParser.TOK_SHOWLOCKS:
+        case HiveParser.TOK_DESCFUNCTION:
+        case HiveParser.TOK_SHOWFUNCTIONS:
+        case HiveParser.TOK_EXPLAIN:
+
+            // Table DDL
+        case HiveParser.TOK_ALTERTABLE_ADDPARTS:
+        case HiveParser.TOK_ALTERTABLE_ADDCOLS:
+        case HiveParser.TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION:
+        case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES:
+        case HiveParser.TOK_ALTERTABLE_CLUSTER_SORT:
+        case HiveParser.TOK_ALTERTABLE_DROPPARTS:
+        case HiveParser.TOK_ALTERTABLE_PROPERTIES:
+        case HiveParser.TOK_ALTERTABLE_RENAME:
+        case HiveParser.TOK_ALTERTABLE_RENAMECOL:
+        case HiveParser.TOK_ALTERTABLE_REPLACECOLS:
+        case HiveParser.TOK_ALTERTABLE_SERIALIZER:
+        case HiveParser.TOK_ALTERTABLE_TOUCH:
+        case HiveParser.TOK_DESCTABLE:
+        case HiveParser.TOK_DROPTABLE:
+        case HiveParser.TOK_SHOW_TABLESTATUS:
+        case HiveParser.TOK_SHOWPARTITIONS:
+        case HiveParser.TOK_SHOWTABLES:
             return ast;
-          } else if (((ASTNode)ast.getChild(1)).getToken().getType() == HiveParser.TOK_ALTERTABLE_ALTERPARTS_MERGEFILES){
-              // unsupported
-              throw new SemanticException("Operation not supported.");
-          } else {
-              return ast;
-          }
 
-      // HCat will allow these operations to be performed.
-      // Database DDL
-      case HiveParser.TOK_SHOWDATABASES:
-      case HiveParser.TOK_DROPDATABASE:
-      case HiveParser.TOK_SWITCHDATABASE:
-      case HiveParser.TOK_DESCDATABASE:
-      case HiveParser.TOK_ALTERDATABASE_PROPERTIES:
+        // In all other cases, throw an exception. Its a white-list of allowed operations.
+        default:
+            throw new SemanticException("Operation not supported.");
 
-      // Index DDL
-      case HiveParser.TOK_ALTERINDEX_PROPERTIES:
-      case HiveParser.TOK_CREATEINDEX:
-      case HiveParser.TOK_DROPINDEX:
-      case HiveParser.TOK_SHOWINDEXES:
-
-      // View DDL
-      // "alter view add partition" does not work because of the nature of implementation
-      // of the DDL in hive. Hive will internally invoke another Driver on the select statement,
-      // and HCat does not let "select" statement through. I cannot find a way to get around it
-      // without modifying hive code. So just leave it unsupported.
-      //case HiveParser.TOK_ALTERVIEW_ADDPARTS:
-      case HiveParser.TOK_ALTERVIEW_DROPPARTS:
-      case HiveParser.TOK_ALTERVIEW_PROPERTIES:
-      case HiveParser.TOK_ALTERVIEW_RENAME:
-      case HiveParser.TOK_CREATEVIEW:
-      case HiveParser.TOK_DROPVIEW:
-
-      // Authorization DDL
-      case HiveParser.TOK_CREATEROLE:
-      case HiveParser.TOK_DROPROLE:
-      case HiveParser.TOK_GRANT_ROLE:
-      case HiveParser.TOK_GRANT_WITH_OPTION:
-      case HiveParser.TOK_GRANT:
-      case HiveParser.TOK_REVOKE_ROLE:
-      case HiveParser.TOK_REVOKE:
-      case HiveParser.TOK_SHOW_GRANT:
-      case HiveParser.TOK_SHOW_ROLE_GRANT:
-
-      // Misc DDL
-      case HiveParser.TOK_LOCKTABLE:
-      case HiveParser.TOK_UNLOCKTABLE:
-      case HiveParser.TOK_SHOWLOCKS:
-      case HiveParser.TOK_DESCFUNCTION:
-      case HiveParser.TOK_SHOWFUNCTIONS:
-      case HiveParser.TOK_EXPLAIN:
-
-      // Table DDL
-      case HiveParser.TOK_ALTERTABLE_ADDPARTS:
-      case HiveParser.TOK_ALTERTABLE_ADDCOLS:
-      case HiveParser.TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION:
-      case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES:
-      case HiveParser.TOK_ALTERTABLE_CLUSTER_SORT:
-      case HiveParser.TOK_ALTERTABLE_DROPPARTS:
-      case HiveParser.TOK_ALTERTABLE_PROPERTIES:
-      case HiveParser.TOK_ALTERTABLE_RENAME:
-      case HiveParser.TOK_ALTERTABLE_RENAMECOL:
-      case HiveParser.TOK_ALTERTABLE_REPLACECOLS:
-      case HiveParser.TOK_ALTERTABLE_SERIALIZER:
-      case HiveParser.TOK_ALTERTABLE_TOUCH:
-      case HiveParser.TOK_DESCTABLE:
-      case HiveParser.TOK_DROPTABLE:
-      case HiveParser.TOK_SHOW_TABLESTATUS:
-      case HiveParser.TOK_SHOWPARTITIONS:
-      case HiveParser.TOK_SHOWTABLES:
-        return ast;
-
-      // In all other cases, throw an exception. Its a white-list of allowed operations.
-      default:
-        throw new SemanticException("Operation not supported.");
-
-      }
-  }
-
-  @Override
-  public void postAnalyze(HiveSemanticAnalyzerHookContext context,
-      List<Task<? extends Serializable>> rootTasks) throws SemanticException {
-
-    try{
-
-      switch (ast.getToken().getType()) {
-
-      case HiveParser.TOK_CREATETABLE:
-      case HiveParser.TOK_CREATEDATABASE:
-      case HiveParser.TOK_ALTERTABLE_PARTITION:
-
-      // HCat will allow these operations to be performed.
-      // Database DDL
-      case HiveParser.TOK_SHOWDATABASES:
-      case HiveParser.TOK_DROPDATABASE:
-      case HiveParser.TOK_SWITCHDATABASE:
-      case HiveParser.TOK_DESCDATABASE:
-      case HiveParser.TOK_ALTERDATABASE_PROPERTIES:
-
-      // Index DDL
-      case HiveParser.TOK_ALTERINDEX_PROPERTIES:
-      case HiveParser.TOK_CREATEINDEX:
-      case HiveParser.TOK_DROPINDEX:
-      case HiveParser.TOK_SHOWINDEXES:
-
-      // View DDL
-      //case HiveParser.TOK_ALTERVIEW_ADDPARTS:
-      case HiveParser.TOK_ALTERVIEW_DROPPARTS:
-      case HiveParser.TOK_ALTERVIEW_PROPERTIES:
-      case HiveParser.TOK_ALTERVIEW_RENAME:
-      case HiveParser.TOK_CREATEVIEW:
-      case HiveParser.TOK_DROPVIEW:
-
-      // Authorization DDL
-      case HiveParser.TOK_CREATEROLE:
-      case HiveParser.TOK_DROPROLE:
-      case HiveParser.TOK_GRANT_ROLE:
-      case HiveParser.TOK_GRANT_WITH_OPTION:
-      case HiveParser.TOK_GRANT:
-      case HiveParser.TOK_REVOKE_ROLE:
-      case HiveParser.TOK_REVOKE:
-      case HiveParser.TOK_SHOW_GRANT:
-      case HiveParser.TOK_SHOW_ROLE_GRANT:
-
-      // Misc DDL
-      case HiveParser.TOK_LOCKTABLE:
-      case HiveParser.TOK_UNLOCKTABLE:
-      case HiveParser.TOK_SHOWLOCKS:
-      case HiveParser.TOK_DESCFUNCTION:
-      case HiveParser.TOK_SHOWFUNCTIONS:
-      case HiveParser.TOK_EXPLAIN:
-
-      // Table DDL
-      case HiveParser.TOK_ALTERTABLE_ADDPARTS:
-      case HiveParser.TOK_ALTERTABLE_ADDCOLS:
-      case HiveParser.TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION:
-      case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES:
-      case HiveParser.TOK_ALTERTABLE_CLUSTER_SORT:
-      case HiveParser.TOK_ALTERTABLE_DROPPARTS:
-      case HiveParser.TOK_ALTERTABLE_PROPERTIES:
-      case HiveParser.TOK_ALTERTABLE_RENAME:
-      case HiveParser.TOK_ALTERTABLE_RENAMECOL:
-      case HiveParser.TOK_ALTERTABLE_REPLACECOLS:
-      case HiveParser.TOK_ALTERTABLE_SERIALIZER:
-      case HiveParser.TOK_ALTERTABLE_TOUCH:
-      case HiveParser.TOK_DESCTABLE:
-      case HiveParser.TOK_DROPTABLE:
-      case HiveParser.TOK_SHOW_TABLESTATUS:
-      case HiveParser.TOK_SHOWPARTITIONS:
-      case HiveParser.TOK_SHOWTABLES:
-        break;
-
-      default:
-        throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION, "Unexpected token: "+ast.getToken());
-      }
-
-      authorizeDDL(context, rootTasks);
-
-    } catch(HCatException e){
-      throw new SemanticException(e);
-    } catch (HiveException e) {
-      throw new SemanticException(e);
-    }
-
-    if(hook != null){
-      hook.postAnalyze(context, rootTasks);
-    }
-  }
-
-  private String extractTableName (String compoundName) {
-    /* 
-     * the table name can potentially be a dot-format one with column names
-     * specified as part of the table name. e.g. a.b.c where b is a column in
-     * a and c is a field of the object/column b etc. For authorization 
-     * purposes, we should use only the first part of the dotted name format.
-     *
-     */
-
-   String [] words = compoundName.split("\\.");
-   return words[0];
-  }
-
-  @Override
-  protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive, DDLWork work)
-      throws HiveException {
-    // DB opereations, none of them are enforced by Hive right now.
-
-    ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc();
-    if (showDatabases != null) {
-      authorize(HiveOperation.SHOWDATABASES.getInputRequiredPrivileges(),
-          HiveOperation.SHOWDATABASES.getOutputRequiredPrivileges());
-    }
-
-    DropDatabaseDesc dropDb = work.getDropDatabaseDesc();
-    if (dropDb != null) {
-      Database db = cntxt.getHive().getDatabase(dropDb.getDatabaseName());
-      authorize(db, Privilege.DROP);
-    }
-
-    DescDatabaseDesc descDb = work.getDescDatabaseDesc();
-    if (descDb != null) {
-      Database db = cntxt.getHive().getDatabase(descDb.getDatabaseName());
-      authorize(db, Privilege.SELECT);
-    }
-
-    SwitchDatabaseDesc switchDb = work.getSwitchDatabaseDesc();
-    if (switchDb != null) {
-      Database db = cntxt.getHive().getDatabase(switchDb.getDatabaseName());
-      authorize(db, Privilege.SELECT);
-    }
-
-    ShowTablesDesc showTables = work.getShowTblsDesc();
-    if (showTables != null) {
-      String dbName = showTables.getDbName() == null ? cntxt.getHive().getCurrentDatabase()
-          : showTables.getDbName();
-      authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT);
-    }
-
-    ShowTableStatusDesc showTableStatus = work.getShowTblStatusDesc();
-    if (showTableStatus != null) {
-      String dbName = showTableStatus.getDbName() == null ? cntxt.getHive().getCurrentDatabase()
-          : showTableStatus.getDbName();
-      authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT);
-    }
-
-    // TODO: add alter database support in HCat
-
-    // Table operations.
-
-    DropTableDesc dropTable = work.getDropTblDesc();
-    if (dropTable != null) {
-      if (dropTable.getPartSpecs() == null) {
-        // drop table is already enforced by Hive. We only check for table level location even if the
-        // table is partitioned.
-      } else {
-        //this is actually a ALTER TABLE DROP PARITITION statement
-        for (PartitionSpec partSpec : dropTable.getPartSpecs()) {
-          // partitions are not added as write entries in drop partitions in Hive
-          Table table = hive.getTable(hive.getCurrentDatabase(), dropTable.getTableName());
-          List<Partition> partitions = null;
-          try {
-            partitions = hive.getPartitionsByFilter(table, partSpec.toString());
-           } catch (Exception e) {
-            throw new HiveException(e);
-           }
-
-          for (Partition part : partitions) {
-            authorize(part, Privilege.DROP);
-          }
         }
-      }
     }
 
-    AlterTableDesc alterTable = work.getAlterTblDesc();
-    if (alterTable != null) {
-      Table table = hive.getTable(hive.getCurrentDatabase(), alterTable.getOldName(), false);
+    @Override
+    public void postAnalyze(HiveSemanticAnalyzerHookContext context,
+                            List<Task<? extends Serializable>> rootTasks) throws SemanticException {
 
-      Partition part = null;
-      if (alterTable.getPartSpec() != null) {
-        part = hive.getPartition(table, alterTable.getPartSpec(), false);
-      }
+        try {
 
-      String newLocation = alterTable.getNewLocation();
+            switch (ast.getToken().getType()) {
 
-      /* Hcat requires ALTER_DATA privileges for ALTER TABLE LOCATION statements
-       * for the old table/partition location and the new location.
-       */
-      if (alterTable.getOp() == AlterTableDesc.AlterTableTypes.ALTERLOCATION) {
-        if (part != null) {
-          authorize(part, Privilege.ALTER_DATA); // authorize for the old
-                                                 // location, and new location
-          part.setLocation(newLocation);
-          authorize(part, Privilege.ALTER_DATA);
-        } else {
-          authorize(table, Privilege.ALTER_DATA); // authorize for the old
-                                                  // location, and new location
-          table.getTTable().getSd().setLocation(newLocation);
-          authorize(table, Privilege.ALTER_DATA);
+            case HiveParser.TOK_CREATETABLE:
+            case HiveParser.TOK_CREATEDATABASE:
+            case HiveParser.TOK_ALTERTABLE_PARTITION:
+
+                // HCat will allow these operations to be performed.
+                // Database DDL
+            case HiveParser.TOK_SHOWDATABASES:
+            case HiveParser.TOK_DROPDATABASE:
+            case HiveParser.TOK_SWITCHDATABASE:
+            case HiveParser.TOK_DESCDATABASE:
+            case HiveParser.TOK_ALTERDATABASE_PROPERTIES:
+
+                // Index DDL
+            case HiveParser.TOK_ALTERINDEX_PROPERTIES:
+            case HiveParser.TOK_CREATEINDEX:
+            case HiveParser.TOK_DROPINDEX:
+            case HiveParser.TOK_SHOWINDEXES:
+
+                // View DDL
+                //case HiveParser.TOK_ALTERVIEW_ADDPARTS:
+            case HiveParser.TOK_ALTERVIEW_DROPPARTS:
+            case HiveParser.TOK_ALTERVIEW_PROPERTIES:
+            case HiveParser.TOK_ALTERVIEW_RENAME:
+            case HiveParser.TOK_CREATEVIEW:
+            case HiveParser.TOK_DROPVIEW:
+
+                // Authorization DDL
+            case HiveParser.TOK_CREATEROLE:
+            case HiveParser.TOK_DROPROLE:
+            case HiveParser.TOK_GRANT_ROLE:
+            case HiveParser.TOK_GRANT_WITH_OPTION:
+            case HiveParser.TOK_GRANT:
+            case HiveParser.TOK_REVOKE_ROLE:
+            case HiveParser.TOK_REVOKE:
+            case HiveParser.TOK_SHOW_GRANT:
+            case HiveParser.TOK_SHOW_ROLE_GRANT:
+
+                // Misc DDL
+            case HiveParser.TOK_LOCKTABLE:
+            case HiveParser.TOK_UNLOCKTABLE:
+            case HiveParser.TOK_SHOWLOCKS:
+            case HiveParser.TOK_DESCFUNCTION:
+            case HiveParser.TOK_SHOWFUNCTIONS:
+            case HiveParser.TOK_EXPLAIN:
+
+                // Table DDL
+            case HiveParser.TOK_ALTERTABLE_ADDPARTS:
+            case HiveParser.TOK_ALTERTABLE_ADDCOLS:
+            case HiveParser.TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION:
+            case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES:
+            case HiveParser.TOK_ALTERTABLE_CLUSTER_SORT:
+            case HiveParser.TOK_ALTERTABLE_DROPPARTS:
+            case HiveParser.TOK_ALTERTABLE_PROPERTIES:
+            case HiveParser.TOK_ALTERTABLE_RENAME:
+            case HiveParser.TOK_ALTERTABLE_RENAMECOL:
+            case HiveParser.TOK_ALTERTABLE_REPLACECOLS:
+            case HiveParser.TOK_ALTERTABLE_SERIALIZER:
+            case HiveParser.TOK_ALTERTABLE_TOUCH:
+            case HiveParser.TOK_DESCTABLE:
+            case HiveParser.TOK_DROPTABLE:
+            case HiveParser.TOK_SHOW_TABLESTATUS:
+            case HiveParser.TOK_SHOWPARTITIONS:
+            case HiveParser.TOK_SHOWTABLES:
+                break;
+
+            default:
+                throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION, "Unexpected token: " + ast.getToken());
+            }
+
+            authorizeDDL(context, rootTasks);
+
+        } catch (HCatException e) {
+            throw new SemanticException(e);
+        } catch (HiveException e) {
+            throw new SemanticException(e);
         }
-      }
-      //other alter operations are already supported by Hive
+
+        if (hook != null) {
+            hook.postAnalyze(context, rootTasks);
+        }
     }
 
-    // we should be careful when authorizing table based on just the 
-    // table name. If columns have separate authorization domain, it 
-    // must be honored
-    DescTableDesc descTable = work.getDescTblDesc();
-    if (descTable != null) {
-      String tableName = extractTableName(descTable.getTableName());
-      authorizeTable(cntxt.getHive(), tableName, Privilege.SELECT);
+    private String extractTableName(String compoundName) {
+        /*
+        * the table name can potentially be a dot-format one with column names
+        * specified as part of the table name. e.g. a.b.c where b is a column in
+        * a and c is a field of the object/column b etc. For authorization
+        * purposes, we should use only the first part of the dotted name format.
+        *
+        */
+
+        String[] words = compoundName.split("\\.");
+        return words[0];
     }
 
-    ShowPartitionsDesc showParts = work.getShowPartsDesc();
-    if (showParts != null) {
-      String tableName = extractTableName(showParts.getTabName());
-      authorizeTable(cntxt.getHive(), tableName, Privilege.SELECT);
+    @Override
+    protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive, DDLWork work)
+        throws HiveException {
+        // DB opereations, none of them are enforced by Hive right now.
+
+        ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc();
+        if (showDatabases != null) {
+            authorize(HiveOperation.SHOWDATABASES.getInputRequiredPrivileges(),
+                HiveOperation.SHOWDATABASES.getOutputRequiredPrivileges());
+        }
+
+        DropDatabaseDesc dropDb = work.getDropDatabaseDesc();
+        if (dropDb != null) {
+            Database db = cntxt.getHive().getDatabase(dropDb.getDatabaseName());
+            authorize(db, Privilege.DROP);
+        }
+
+        DescDatabaseDesc descDb = work.getDescDatabaseDesc();
+        if (descDb != null) {
+            Database db = cntxt.getHive().getDatabase(descDb.getDatabaseName());
+            authorize(db, Privilege.SELECT);
+        }
+
+        SwitchDatabaseDesc switchDb = work.getSwitchDatabaseDesc();
+        if (switchDb != null) {
+            Database db = cntxt.getHive().getDatabase(switchDb.getDatabaseName());
+            authorize(db, Privilege.SELECT);
+        }
+
+        ShowTablesDesc showTables = work.getShowTblsDesc();
+        if (showTables != null) {
+            String dbName = showTables.getDbName() == null ? cntxt.getHive().getCurrentDatabase()
+                : showTables.getDbName();
+            authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT);
+        }
+
+        ShowTableStatusDesc showTableStatus = work.getShowTblStatusDesc();
+        if (showTableStatus != null) {
+            String dbName = showTableStatus.getDbName() == null ? cntxt.getHive().getCurrentDatabase()
+                : showTableStatus.getDbName();
+            authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT);
+        }
+
+        // TODO: add alter database support in HCat
+
+        // Table operations.
+
+        DropTableDesc dropTable = work.getDropTblDesc();
+        if (dropTable != null) {
+            if (dropTable.getPartSpecs() == null) {
+                // drop table is already enforced by Hive. We only check for table level location even if the
+                // table is partitioned.
+            } else {
+                //this is actually a ALTER TABLE DROP PARITITION statement
+                for (PartitionSpec partSpec : dropTable.getPartSpecs()) {
+                    // partitions are not added as write entries in drop partitions in Hive
+                    Table table = hive.getTable(hive.getCurrentDatabase(), dropTable.getTableName());
+                    List<Partition> partitions = null;
+                    try {
+                        partitions = hive.getPartitionsByFilter(table, partSpec.toString());
+                    } catch (Exception e) {
+                        throw new HiveException(e);
+                    }
+
+                    for (Partition part : partitions) {
+                        authorize(part, Privilege.DROP);
+                    }
+                }
+            }
+        }
+
+        AlterTableDesc alterTable = work.getAlterTblDesc();
+        if (alterTable != null) {
+            Table table = hive.getTable(hive.getCurrentDatabase(), alterTable.getOldName(), false);
+
+            Partition part = null;
+            if (alterTable.getPartSpec() != null) {
+                part = hive.getPartition(table, alterTable.getPartSpec(), false);
+            }
+
+            String newLocation = alterTable.getNewLocation();
+
+            /* Hcat requires ALTER_DATA privileges for ALTER TABLE LOCATION statements
+            * for the old table/partition location and the new location.
+            */
+            if (alterTable.getOp() == AlterTableDesc.AlterTableTypes.ALTERLOCATION) {
+                if (part != null) {
+                    authorize(part, Privilege.ALTER_DATA); // authorize for the old
+                    // location, and new location
+                    part.setLocation(newLocation);
+                    authorize(part, Privilege.ALTER_DATA);
+                } else {
+                    authorize(table, Privilege.ALTER_DATA); // authorize for the old
+                    // location, and new location
+                    table.getTTable().getSd().setLocation(newLocation);
+                    authorize(table, Privilege.ALTER_DATA);
+                }
+            }
+            //other alter operations are already supported by Hive
+        }
+
+        // we should be careful when authorizing table based on just the
+        // table name. If columns have separate authorization domain, it
+        // must be honored
+        DescTableDesc descTable = work.getDescTblDesc();
+        if (descTable != null) {
+            String tableName = extractTableName(descTable.getTableName());
+            authorizeTable(cntxt.getHive(), tableName, Privilege.SELECT);
+        }
+
+        ShowPartitionsDesc showParts = work.getShowPartsDesc();
+        if (showParts != null) {
+            String tableName = extractTableName(showParts.getTabName());
+            authorizeTable(cntxt.getHive(), tableName, Privilege.SELECT);
+        }
     }
-  }
 }
diff --git a/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java b/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java
index 4b394e1..df99a8e 100644
--- a/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java
+++ b/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java
@@ -38,142 +38,141 @@
 import org.apache.hadoop.hive.ql.security.authorization.Privilege;
 import org.apache.hadoop.hive.ql.session.SessionState;
 
-/** 
+/**
  * Base class for HCatSemanticAnalyzer hooks.
  */
 public class HCatSemanticAnalyzerBase extends AbstractSemanticAnalyzerHook {
 
-  private HiveAuthorizationProvider authProvider;
-  
-  protected String getDbName(Hive hive, String dbName) {
-    return dbName == null ? hive.getCurrentDatabase() : dbName;
-  }
-  
-  public HiveAuthorizationProvider getAuthProvider() {
-    if (authProvider == null) {
-      authProvider = SessionState.get().getAuthorizer();
-    }
-    
-    return authProvider;
-  }
+    private HiveAuthorizationProvider authProvider;
 
-  @Override
-  public void postAnalyze(HiveSemanticAnalyzerHookContext context,
-      List<Task<? extends Serializable>> rootTasks) throws SemanticException {
-    super.postAnalyze(context, rootTasks);
-    
-    //Authorize the operation.
-    authorizeDDL(context, rootTasks);
-  }
-  
-  /** 
-   * Checks for the given rootTasks, and calls authorizeDDLWork() for each DDLWork to 
-   * be authorized. The hooks should override this, or authorizeDDLWork to perform the 
-   * actual authorization.
-   */
-  /*
-   * Impl note: Hive provides authorization with it's own model, and calls the defined 
-   * HiveAuthorizationProvider from Driver.doAuthorization(). However, HCat has to 
-   * do additional calls to the auth provider to implement expected behavior for 
-   * StorageDelegationAuthorizationProvider. This means, that the defined auth provider 
-   * is called by both Hive and HCat. The following are missing from Hive's implementation,
-   * and when they are fixed in Hive, we can remove the HCat-specific auth checks.   
-   * 1. CREATE DATABASE/TABLE, ADD PARTITION statements does not call 
-   * HiveAuthorizationProvider.authorize() with the candidate objects, which means that
-   * we cannot do checks against defined LOCATION.
-   * 2. HiveOperation does not define sufficient Privileges for most of the operations, 
-   * especially database operations. 
-   * 3. For some of the operations, Hive SemanticAnalyzer does not add the changed 
-   * object as a WriteEntity or ReadEntity.
-   * 
-   * @see https://issues.apache.org/jira/browse/HCATALOG-244
-   * @see https://issues.apache.org/jira/browse/HCATALOG-245
-   */
-  protected void authorizeDDL(HiveSemanticAnalyzerHookContext context, 
-      List<Task<? extends Serializable>> rootTasks)  throws SemanticException {
-    
-    if (!HiveConf.getBoolVar(context.getConf(),
-        HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
-      return;
+    protected String getDbName(Hive hive, String dbName) {
+        return dbName == null ? hive.getCurrentDatabase() : dbName;
     }
-    
-    Hive hive;
-    try {
-      hive = context.getHive();
-    
-      for (Task<? extends Serializable> task : rootTasks) {
-        if (task.getWork() instanceof DDLWork) {
-          DDLWork work = (DDLWork)task.getWork();
-          if (work != null) {
-            authorizeDDLWork(context, hive, work);
-          }
+
+    public HiveAuthorizationProvider getAuthProvider() {
+        if (authProvider == null) {
+            authProvider = SessionState.get().getAuthorizer();
         }
-      }
-    } catch (SemanticException ex) {
-      throw ex;
-    } catch (AuthorizationException ex) {
-      throw ex;
-    } catch (Exception ex) {
-      throw new SemanticException(ex);
-    }
-  }
-  
-  /** 
-   * Authorized the given DDLWork. Does nothing by default. Override this 
-   * and delegate to the relevant method in HiveAuthorizationProvider obtained by 
-   * getAuthProvider().
-   */
-  protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext context,
-      Hive hive, DDLWork work) throws HiveException {
-  }
 
-  protected void authorize(Privilege[] inputPrivs, Privilege[] outputPrivs)
-      throws AuthorizationException, SemanticException {
-    try {
-      getAuthProvider().authorize(inputPrivs, outputPrivs);
-    } catch (HiveException ex) {
-      throw new SemanticException(ex);
+        return authProvider;
     }
-  }
-  
-  protected void authorize(Database db, Privilege priv) 
-      throws AuthorizationException, SemanticException {
-    try {
-      getAuthProvider().authorize(db, null, new Privilege[] {priv});
-    } catch (HiveException ex) {
-      throw new SemanticException(ex);
+
+    @Override
+    public void postAnalyze(HiveSemanticAnalyzerHookContext context,
+                            List<Task<? extends Serializable>> rootTasks) throws SemanticException {
+        super.postAnalyze(context, rootTasks);
+
+        //Authorize the operation.
+        authorizeDDL(context, rootTasks);
     }
-  }
-  
-  protected void authorizeTable(Hive hive, String tableName, Privilege priv) 
-      throws AuthorizationException, HiveException {
-    Table table;
-    try{
-      table = hive.getTable(tableName);
+
+    /**
+     * Checks for the given rootTasks, and calls authorizeDDLWork() for each DDLWork to
+     * be authorized. The hooks should override this, or authorizeDDLWork to perform the
+     * actual authorization.
+     */
+    /*
+    * Impl note: Hive provides authorization with it's own model, and calls the defined
+    * HiveAuthorizationProvider from Driver.doAuthorization(). However, HCat has to
+    * do additional calls to the auth provider to implement expected behavior for
+    * StorageDelegationAuthorizationProvider. This means, that the defined auth provider
+    * is called by both Hive and HCat. The following are missing from Hive's implementation,
+    * and when they are fixed in Hive, we can remove the HCat-specific auth checks.
+    * 1. CREATE DATABASE/TABLE, ADD PARTITION statements does not call
+    * HiveAuthorizationProvider.authorize() with the candidate objects, which means that
+    * we cannot do checks against defined LOCATION.
+    * 2. HiveOperation does not define sufficient Privileges for most of the operations,
+    * especially database operations.
+    * 3. For some of the operations, Hive SemanticAnalyzer does not add the changed
+    * object as a WriteEntity or ReadEntity.
+    *
+    * @see https://issues.apache.org/jira/browse/HCATALOG-244
+    * @see https://issues.apache.org/jira/browse/HCATALOG-245
+    */
+    protected void authorizeDDL(HiveSemanticAnalyzerHookContext context,
+                                List<Task<? extends Serializable>> rootTasks) throws SemanticException {
+
+        if (!HiveConf.getBoolVar(context.getConf(),
+            HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
+            return;
+        }
+
+        Hive hive;
+        try {
+            hive = context.getHive();
+
+            for (Task<? extends Serializable> task : rootTasks) {
+                if (task.getWork() instanceof DDLWork) {
+                    DDLWork work = (DDLWork) task.getWork();
+                    if (work != null) {
+                        authorizeDDLWork(context, hive, work);
+                    }
+                }
+            }
+        } catch (SemanticException ex) {
+            throw ex;
+        } catch (AuthorizationException ex) {
+            throw ex;
+        } catch (Exception ex) {
+            throw new SemanticException(ex);
+        }
     }
-    catch(InvalidTableException ite){
-      // Table itself doesn't exist in metastore, nothing to validate.
-      return;
+
+    /**
+     * Authorized the given DDLWork. Does nothing by default. Override this
+     * and delegate to the relevant method in HiveAuthorizationProvider obtained by
+     * getAuthProvider().
+     */
+    protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext context,
+                                    Hive hive, DDLWork work) throws HiveException {
     }
-    
-    authorize(table, priv);
-  }
-  
-  protected void authorize(Table table, Privilege priv) 
-      throws AuthorizationException, SemanticException {
-    try {
-      getAuthProvider().authorize(table, new Privilege[] {priv}, null);
-    } catch (HiveException ex) {
-      throw new SemanticException(ex);
+
+    protected void authorize(Privilege[] inputPrivs, Privilege[] outputPrivs)
+        throws AuthorizationException, SemanticException {
+        try {
+            getAuthProvider().authorize(inputPrivs, outputPrivs);
+        } catch (HiveException ex) {
+            throw new SemanticException(ex);
+        }
     }
-  }
-  
-  protected void authorize(Partition part, Privilege priv) 
-      throws AuthorizationException, SemanticException {
-    try {
-      getAuthProvider().authorize(part, new Privilege[] {priv}, null);
-    } catch (HiveException ex) {
-      throw new SemanticException(ex);
+
+    protected void authorize(Database db, Privilege priv)
+        throws AuthorizationException, SemanticException {
+        try {
+            getAuthProvider().authorize(db, null, new Privilege[]{priv});
+        } catch (HiveException ex) {
+            throw new SemanticException(ex);
+        }
     }
-  }
+
+    protected void authorizeTable(Hive hive, String tableName, Privilege priv)
+        throws AuthorizationException, HiveException {
+        Table table;
+        try {
+            table = hive.getTable(tableName);
+        } catch (InvalidTableException ite) {
+            // Table itself doesn't exist in metastore, nothing to validate.
+            return;
+        }
+
+        authorize(table, priv);
+    }
+
+    protected void authorize(Table table, Privilege priv)
+        throws AuthorizationException, SemanticException {
+        try {
+            getAuthProvider().authorize(table, new Privilege[]{priv}, null);
+        } catch (HiveException ex) {
+            throw new SemanticException(ex);
+        }
+    }
+
+    protected void authorize(Partition part, Privilege priv)
+        throws AuthorizationException, SemanticException {
+        try {
+            getAuthProvider().authorize(part, new Privilege[]{priv}, null);
+        } catch (HiveException ex) {
+            throw new SemanticException(ex);
+        }
+    }
 }
diff --git a/src/java/org/apache/hcatalog/common/HCatConstants.java b/src/java/org/apache/hcatalog/common/HCatConstants.java
index ad5a355..626d91b 100644
--- a/src/java/org/apache/hcatalog/common/HCatConstants.java
+++ b/src/java/org/apache/hcatalog/common/HCatConstants.java
@@ -23,140 +23,140 @@
 
 public final class HCatConstants {
 
-  public static final String HIVE_RCFILE_IF_CLASS = "org.apache.hadoop.hive.ql.io.RCFileInputFormat";
-  public static final String HIVE_RCFILE_OF_CLASS = "org.apache.hadoop.hive.ql.io.RCFileOutputFormat";
+    public static final String HIVE_RCFILE_IF_CLASS = "org.apache.hadoop.hive.ql.io.RCFileInputFormat";
+    public static final String HIVE_RCFILE_OF_CLASS = "org.apache.hadoop.hive.ql.io.RCFileOutputFormat";
 
-  public static final String SEQUENCEFILE_INPUT = SequenceFileInputFormat.class.getName();
-  public static final String SEQUENCEFILE_OUTPUT = SequenceFileOutputFormat.class.getName();
+    public static final String SEQUENCEFILE_INPUT = SequenceFileInputFormat.class.getName();
+    public static final String SEQUENCEFILE_OUTPUT = SequenceFileOutputFormat.class.getName();
 
-  public static final String HCAT_PIG_STORAGE_CLASS = "org.apache.pig.builtin.PigStorage";
-  public static final String HCAT_PIG_LOADER = "hcat.pig.loader";
-  public static final String HCAT_PIG_LOADER_LOCATION_SET = HCAT_PIG_LOADER + ".location.set" ;
-  public static final String HCAT_PIG_LOADER_ARGS = "hcat.pig.loader.args";
-  public static final String HCAT_PIG_STORER = "hcat.pig.storer";
-  public static final String HCAT_PIG_STORER_ARGS = "hcat.pig.storer.args";
-  public static final String HCAT_PIG_ARGS_DELIMIT = "hcat.pig.args.delimiter";
-  public static final String HCAT_PIG_ARGS_DELIMIT_DEFAULT = ",";
-  public static final String HCAT_PIG_STORER_LOCATION_SET = HCAT_PIG_STORER + ".location.set" ;
-  public static final String HCAT_PIG_INNER_TUPLE_NAME = "hcat.pig.inner.tuple.name";
-  public static final String HCAT_PIG_INNER_TUPLE_NAME_DEFAULT = "innertuple";
-  public static final String HCAT_PIG_INNER_FIELD_NAME = "hcat.pig.inner.field.name";
-  public static final String HCAT_PIG_INNER_FIELD_NAME_DEFAULT = "innerfield";
+    public static final String HCAT_PIG_STORAGE_CLASS = "org.apache.pig.builtin.PigStorage";
+    public static final String HCAT_PIG_LOADER = "hcat.pig.loader";
+    public static final String HCAT_PIG_LOADER_LOCATION_SET = HCAT_PIG_LOADER + ".location.set";
+    public static final String HCAT_PIG_LOADER_ARGS = "hcat.pig.loader.args";
+    public static final String HCAT_PIG_STORER = "hcat.pig.storer";
+    public static final String HCAT_PIG_STORER_ARGS = "hcat.pig.storer.args";
+    public static final String HCAT_PIG_ARGS_DELIMIT = "hcat.pig.args.delimiter";
+    public static final String HCAT_PIG_ARGS_DELIMIT_DEFAULT = ",";
+    public static final String HCAT_PIG_STORER_LOCATION_SET = HCAT_PIG_STORER + ".location.set";
+    public static final String HCAT_PIG_INNER_TUPLE_NAME = "hcat.pig.inner.tuple.name";
+    public static final String HCAT_PIG_INNER_TUPLE_NAME_DEFAULT = "innertuple";
+    public static final String HCAT_PIG_INNER_FIELD_NAME = "hcat.pig.inner.field.name";
+    public static final String HCAT_PIG_INNER_FIELD_NAME_DEFAULT = "innerfield";
 
-  //The keys used to store info into the job Configuration
-  public static final String HCAT_KEY_BASE = "mapreduce.lib.hcat";
+    //The keys used to store info into the job Configuration
+    public static final String HCAT_KEY_BASE = "mapreduce.lib.hcat";
 
-  public static final String HCAT_KEY_OUTPUT_SCHEMA = HCAT_KEY_BASE + ".output.schema";
+    public static final String HCAT_KEY_OUTPUT_SCHEMA = HCAT_KEY_BASE + ".output.schema";
 
-  public static final String HCAT_KEY_JOB_INFO =  HCAT_KEY_BASE + ".job.info";
+    public static final String HCAT_KEY_JOB_INFO = HCAT_KEY_BASE + ".job.info";
 
-  // hcatalog specific configurations, that can be put in hive-site.xml
-  public static final String HCAT_HIVE_CLIENT_EXPIRY_TIME = "hcatalog.hive.client.cache.expiry.time";
+    // hcatalog specific configurations, that can be put in hive-site.xml
+    public static final String HCAT_HIVE_CLIENT_EXPIRY_TIME = "hcatalog.hive.client.cache.expiry.time";
 
     private HCatConstants() { // restrict instantiation
-  }
+    }
 
-  public static final String HCAT_TABLE_SCHEMA = "hcat.table.schema";
+    public static final String HCAT_TABLE_SCHEMA = "hcat.table.schema";
 
-  public static final String HCAT_METASTORE_URI = HiveConf.ConfVars.METASTOREURIS.varname;
+    public static final String HCAT_METASTORE_URI = HiveConf.ConfVars.METASTOREURIS.varname;
 
-  public static final String HCAT_PERMS = "hcat.perms";
+    public static final String HCAT_PERMS = "hcat.perms";
 
-  public static final String HCAT_GROUP = "hcat.group";
+    public static final String HCAT_GROUP = "hcat.group";
 
-  public static final String HCAT_CREATE_TBL_NAME = "hcat.create.tbl.name";
+    public static final String HCAT_CREATE_TBL_NAME = "hcat.create.tbl.name";
 
-  public static final String HCAT_CREATE_DB_NAME = "hcat.create.db.name";
+    public static final String HCAT_CREATE_DB_NAME = "hcat.create.db.name";
 
-  public static final String HCAT_METASTORE_PRINCIPAL
-          = HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname;
+    public static final String HCAT_METASTORE_PRINCIPAL
+        = HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname;
 
-  // IMPORTANT IMPORTANT IMPORTANT!!!!!
-  //The keys used to store info into the job Configuration.
-  //If any new keys are added, the HCatStorer needs to be updated. The HCatStorer
-  //updates the job configuration in the backend to insert these keys to avoid
-  //having to call setOutput from the backend (which would cause a metastore call
-  //from the map jobs)
-  public static final String HCAT_KEY_OUTPUT_BASE = "mapreduce.lib.hcatoutput";
-  public static final String HCAT_KEY_OUTPUT_INFO = HCAT_KEY_OUTPUT_BASE + ".info";
-  public static final String HCAT_KEY_HIVE_CONF = HCAT_KEY_OUTPUT_BASE + ".hive.conf";
-  public static final String HCAT_KEY_TOKEN_SIGNATURE = HCAT_KEY_OUTPUT_BASE + ".token.sig";
+    // IMPORTANT IMPORTANT IMPORTANT!!!!!
+    //The keys used to store info into the job Configuration.
+    //If any new keys are added, the HCatStorer needs to be updated. The HCatStorer
+    //updates the job configuration in the backend to insert these keys to avoid
+    //having to call setOutput from the backend (which would cause a metastore call
+    //from the map jobs)
+    public static final String HCAT_KEY_OUTPUT_BASE = "mapreduce.lib.hcatoutput";
+    public static final String HCAT_KEY_OUTPUT_INFO = HCAT_KEY_OUTPUT_BASE + ".info";
+    public static final String HCAT_KEY_HIVE_CONF = HCAT_KEY_OUTPUT_BASE + ".hive.conf";
+    public static final String HCAT_KEY_TOKEN_SIGNATURE = HCAT_KEY_OUTPUT_BASE + ".token.sig";
 
-  public static final String[] OUTPUT_CONFS_TO_SAVE = {
-    HCAT_KEY_OUTPUT_INFO,
-    HCAT_KEY_HIVE_CONF,
-    HCAT_KEY_TOKEN_SIGNATURE
+    public static final String[] OUTPUT_CONFS_TO_SAVE = {
+        HCAT_KEY_OUTPUT_INFO,
+        HCAT_KEY_HIVE_CONF,
+        HCAT_KEY_TOKEN_SIGNATURE
     };
 
 
-  public static final String HCAT_MSG_CLEAN_FREQ = "hcat.msg.clean.freq";
-  public static final String HCAT_MSG_EXPIRY_DURATION = "hcat.msg.expiry.duration";
+    public static final String HCAT_MSG_CLEAN_FREQ = "hcat.msg.clean.freq";
+    public static final String HCAT_MSG_EXPIRY_DURATION = "hcat.msg.expiry.duration";
 
-  public static final String HCAT_MSGBUS_TOPIC_NAME = "hcat.msgbus.topic.name";
-  public static final String HCAT_MSGBUS_TOPIC_NAMING_POLICY = "hcat.msgbus.topic.naming.policy";
-  public static final String HCAT_MSGBUS_TOPIC_PREFIX = "hcat.msgbus.topic.prefix";
+    public static final String HCAT_MSGBUS_TOPIC_NAME = "hcat.msgbus.topic.name";
+    public static final String HCAT_MSGBUS_TOPIC_NAMING_POLICY = "hcat.msgbus.topic.naming.policy";
+    public static final String HCAT_MSGBUS_TOPIC_PREFIX = "hcat.msgbus.topic.prefix";
 
-  public static final String HCAT_DYNAMIC_PTN_JOBID = HCAT_KEY_OUTPUT_BASE + "dynamic.jobid";
-  public static final boolean HCAT_IS_DYNAMIC_MAX_PTN_CHECK_ENABLED = false;
+    public static final String HCAT_DYNAMIC_PTN_JOBID = HCAT_KEY_OUTPUT_BASE + "dynamic.jobid";
+    public static final boolean HCAT_IS_DYNAMIC_MAX_PTN_CHECK_ENABLED = false;
 
-  // Message Bus related properties.
-  public static final String HCAT_DEFAULT_TOPIC_PREFIX = "hcat";
-  public static final String HCAT_EVENT = "HCAT_EVENT";
-  public static final String HCAT_ADD_PARTITION_EVENT = "HCAT_ADD_PARTITION";
-  public static final String HCAT_DROP_PARTITION_EVENT = "HCAT_DROP_PARTITION";
-  public static final String HCAT_PARTITION_DONE_EVENT = "HCAT_PARTITION_DONE";
-  public static final String HCAT_ADD_TABLE_EVENT = "HCAT_ADD_TABLE";
-  public static final String HCAT_DROP_TABLE_EVENT = "HCAT_DROP_TABLE";
-  public static final String HCAT_ADD_DATABASE_EVENT = "HCAT_ADD_DATABASE";
-  public static final String HCAT_DROP_DATABASE_EVENT = "HCAT_DROP_DATABASE";
+    // Message Bus related properties.
+    public static final String HCAT_DEFAULT_TOPIC_PREFIX = "hcat";
+    public static final String HCAT_EVENT = "HCAT_EVENT";
+    public static final String HCAT_ADD_PARTITION_EVENT = "HCAT_ADD_PARTITION";
+    public static final String HCAT_DROP_PARTITION_EVENT = "HCAT_DROP_PARTITION";
+    public static final String HCAT_PARTITION_DONE_EVENT = "HCAT_PARTITION_DONE";
+    public static final String HCAT_ADD_TABLE_EVENT = "HCAT_ADD_TABLE";
+    public static final String HCAT_DROP_TABLE_EVENT = "HCAT_DROP_TABLE";
+    public static final String HCAT_ADD_DATABASE_EVENT = "HCAT_ADD_DATABASE";
+    public static final String HCAT_DROP_DATABASE_EVENT = "HCAT_DROP_DATABASE";
 
-  // System environment variables
-  public static final String SYSENV_HADOOP_TOKEN_FILE_LOCATION = "HADOOP_TOKEN_FILE_LOCATION";
+    // System environment variables
+    public static final String SYSENV_HADOOP_TOKEN_FILE_LOCATION = "HADOOP_TOKEN_FILE_LOCATION";
 
-  // Hadoop Conf Var Names
-  public static final String CONF_MAPREDUCE_JOB_CREDENTIALS_BINARY = "mapreduce.job.credentials.binary";
+    // Hadoop Conf Var Names
+    public static final String CONF_MAPREDUCE_JOB_CREDENTIALS_BINARY = "mapreduce.job.credentials.binary";
 
-  //***************************************************************************
-  // Data-related configuration properties.
-  //***************************************************************************
+    //***************************************************************************
+    // Data-related configuration properties.
+    //***************************************************************************
 
-  /**
-   * {@value} (default: {@value #HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER_DEFAULT}).
-   * Pig < 0.10.0 does not have boolean support, and scripts written for pre-boolean Pig versions
-   * will not expect boolean values when upgrading Pig. For integration the option is offered to
-   * convert boolean fields to integers by setting this Hadoop configuration key.
-   */
-  public static final String HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER =
-      "hcat.data.convert.boolean.to.integer";
-  public static final boolean HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER_DEFAULT = false;
+    /**
+     * {@value} (default: {@value #HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER_DEFAULT}).
+     * Pig < 0.10.0 does not have boolean support, and scripts written for pre-boolean Pig versions
+     * will not expect boolean values when upgrading Pig. For integration the option is offered to
+     * convert boolean fields to integers by setting this Hadoop configuration key.
+     */
+    public static final String HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER =
+        "hcat.data.convert.boolean.to.integer";
+    public static final boolean HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER_DEFAULT = false;
 
-  /**
-   * {@value} (default: {@value #HCAT_DATA_TINY_SMALL_INT_PROMOTION_DEFAULT}).
-   * Hive tables support tinyint and smallint columns, while not all processing frameworks support
-   * these types (Pig only has integer for example). Enable this property to promote tinyint and
-   * smallint columns to integer at runtime. Note that writes to tinyint and smallint columns
-   * enforce bounds checking and jobs will fail if attempting to write values outside the column
-   * bounds.
-   */
-  public static final String HCAT_DATA_TINY_SMALL_INT_PROMOTION =
-      "hcat.data.tiny.small.int.promotion";
-  public static final boolean HCAT_DATA_TINY_SMALL_INT_PROMOTION_DEFAULT = false;
+    /**
+     * {@value} (default: {@value #HCAT_DATA_TINY_SMALL_INT_PROMOTION_DEFAULT}).
+     * Hive tables support tinyint and smallint columns, while not all processing frameworks support
+     * these types (Pig only has integer for example). Enable this property to promote tinyint and
+     * smallint columns to integer at runtime. Note that writes to tinyint and smallint columns
+     * enforce bounds checking and jobs will fail if attempting to write values outside the column
+     * bounds.
+     */
+    public static final String HCAT_DATA_TINY_SMALL_INT_PROMOTION =
+        "hcat.data.tiny.small.int.promotion";
+    public static final boolean HCAT_DATA_TINY_SMALL_INT_PROMOTION_DEFAULT = false;
 
-  /**
-   * {@value} (default: {@value #HCAT_INPUT_BAD_RECORD_THRESHOLD_DEFAULT}).
-   * Threshold for the ratio of bad records that will be silently skipped without causing a task
-   * failure. This is useful when processing large data sets with corrupt records, when its
-   * acceptable to skip some bad records.
-   */
-  public static final String HCAT_INPUT_BAD_RECORD_THRESHOLD_KEY = "hcat.input.bad.record.threshold";
-  public static final float HCAT_INPUT_BAD_RECORD_THRESHOLD_DEFAULT = 0.0001f;
+    /**
+     * {@value} (default: {@value #HCAT_INPUT_BAD_RECORD_THRESHOLD_DEFAULT}).
+     * Threshold for the ratio of bad records that will be silently skipped without causing a task
+     * failure. This is useful when processing large data sets with corrupt records, when its
+     * acceptable to skip some bad records.
+     */
+    public static final String HCAT_INPUT_BAD_RECORD_THRESHOLD_KEY = "hcat.input.bad.record.threshold";
+    public static final float HCAT_INPUT_BAD_RECORD_THRESHOLD_DEFAULT = 0.0001f;
 
-  /**
-   * {@value} (default: {@value #HCAT_INPUT_BAD_RECORD_MIN_DEFAULT}).
-   * Number of bad records that will be accepted before applying
-   * {@value #HCAT_INPUT_BAD_RECORD_THRESHOLD_KEY}. This is necessary to prevent an initial bad
-   * record from causing a task failure.
-   */
-  public static final String HCAT_INPUT_BAD_RECORD_MIN_KEY = "hcat.input.bad.record.min";
-  public static final int HCAT_INPUT_BAD_RECORD_MIN_DEFAULT = 2;
+    /**
+     * {@value} (default: {@value #HCAT_INPUT_BAD_RECORD_MIN_DEFAULT}).
+     * Number of bad records that will be accepted before applying
+     * {@value #HCAT_INPUT_BAD_RECORD_THRESHOLD_KEY}. This is necessary to prevent an initial bad
+     * record from causing a task failure.
+     */
+    public static final String HCAT_INPUT_BAD_RECORD_MIN_KEY = "hcat.input.bad.record.min";
+    public static final int HCAT_INPUT_BAD_RECORD_MIN_DEFAULT = 2;
 }
diff --git a/src/java/org/apache/hcatalog/common/HCatContext.java b/src/java/org/apache/hcatalog/common/HCatContext.java
index ac357ae..f19489c 100644
--- a/src/java/org/apache/hcatalog/common/HCatContext.java
+++ b/src/java/org/apache/hcatalog/common/HCatContext.java
@@ -27,28 +27,28 @@
  */
 public class HCatContext {
 
-  private static final HCatContext hCatContext = new HCatContext();
+    private static final HCatContext hCatContext = new HCatContext();
 
-  private final Configuration conf;
+    private final Configuration conf;
 
-  private HCatContext() {
-    conf = new Configuration();
-  }
-
-  public static HCatContext getInstance() {
-    return hCatContext;
-  }
-
-  public Configuration getConf() {
-    return conf;
-  }
-
-  /**
-   * Merge the given configuration into the HCatContext conf, overwriting any existing keys.
-   */
-  public void mergeConf(Configuration conf) {
-    for (Map.Entry<String, String> entry : conf) {
-      this.conf.set(entry.getKey(), entry.getValue());
+    private HCatContext() {
+        conf = new Configuration();
     }
-  }
+
+    public static HCatContext getInstance() {
+        return hCatContext;
+    }
+
+    public Configuration getConf() {
+        return conf;
+    }
+
+    /**
+     * Merge the given configuration into the HCatContext conf, overwriting any existing keys.
+     */
+    public void mergeConf(Configuration conf) {
+        for (Map.Entry<String, String> entry : conf) {
+            this.conf.set(entry.getKey(), entry.getValue());
+        }
+    }
 }
diff --git a/src/java/org/apache/hcatalog/common/HCatException.java b/src/java/org/apache/hcatalog/common/HCatException.java
index deb2e1f..ed51c60 100644
--- a/src/java/org/apache/hcatalog/common/HCatException.java
+++ b/src/java/org/apache/hcatalog/common/HCatException.java
@@ -24,134 +24,135 @@
  */
 public class HCatException extends IOException {
 
-  private static final long serialVersionUID = 1L;
+    private static final long serialVersionUID = 1L;
 
-  /** The error type enum for this exception. */
-  private final ErrorType errorType;
+    /** The error type enum for this exception. */
+    private final ErrorType errorType;
 
-  /**
-   * Instantiates a new hcat exception.
-   * @param errorType the error type
-   */
-  public HCatException(ErrorType errorType) {
-    this(errorType, null, null);
-  }
-
-
-  /**
-   * Instantiates a new hcat exception.
-   * @param errorType the error type
-   * @param cause the cause
-   */
-  public HCatException(ErrorType errorType, Throwable cause) {
-    this(errorType, null, cause);
-  }
-
-  /**
-   * Instantiates a new hcat exception.
-   * @param errorType the error type
-   * @param extraMessage extra messages to add to the message string
-   */
-  public HCatException(ErrorType errorType, String extraMessage) {
-    this(errorType, extraMessage, null);
-  }
-
-  /**
-   * Instantiates a new hcat exception.
-   * @param errorType the error type
-   * @param extraMessage extra messages to add to the message string
-   * @param cause the cause
-   */
-  public HCatException(ErrorType errorType, String extraMessage, Throwable cause) {
-    super(buildErrorMessage(
-        errorType,
-        extraMessage,
-        cause), cause);
-    this.errorType = errorType;
-  }
-
-
-  //TODO : remove default error type constructors after all exceptions
-  //are changed to use error types
-  /**
-   * Instantiates a new hcat exception.
-   * @param message the error message
-   */
-  public HCatException(String message) {
-    this(ErrorType.ERROR_INTERNAL_EXCEPTION, message, null);
-  }
-
-  /**
-   * Instantiates a new hcat exception.
-   * @param message the error message
-   * @param cause the cause
-   */
-  public HCatException(String message, Throwable cause) {
-    this(ErrorType.ERROR_INTERNAL_EXCEPTION, message, cause);
-  }
-
-
-  /**
-   * Builds the error message string. The error type message is appended with the extra message. If appendCause
-   * is true for the error type, then the message of the cause also is added to the message.
-   * @param type the error type
-   * @param extraMessage the extra message string
-   * @param cause the cause for the exception
-   * @return the exception message string
-   */
-  public static String buildErrorMessage(ErrorType type, String extraMessage, Throwable cause) {
-
-    //Initial message is just the error type message
-    StringBuffer message = new StringBuffer(HCatException.class.getName());
-    message.append(" : " + type.getErrorCode());
-    message.append(" : " + type.getErrorMessage());
-
-    if( extraMessage != null ) {
-      //Add the extra message value to buffer
-      message.append(" : " + extraMessage);
+    /**
+     * Instantiates a new hcat exception.
+     * @param errorType the error type
+     */
+    public HCatException(ErrorType errorType) {
+        this(errorType, null, null);
     }
 
-    if( type.appendCauseMessage() ) {
-      if( cause != null ) {
-        //Add the cause message to buffer
-        message.append(". Cause : " + cause.toString());
-      }
+
+    /**
+     * Instantiates a new hcat exception.
+     * @param errorType the error type
+     * @param cause the cause
+     */
+    public HCatException(ErrorType errorType, Throwable cause) {
+        this(errorType, null, cause);
     }
 
-    return message.toString();
-  }
+    /**
+     * Instantiates a new hcat exception.
+     * @param errorType the error type
+     * @param extraMessage extra messages to add to the message string
+     */
+    public HCatException(ErrorType errorType, String extraMessage) {
+        this(errorType, extraMessage, null);
+    }
+
+    /**
+     * Instantiates a new hcat exception.
+     * @param errorType the error type
+     * @param extraMessage extra messages to add to the message string
+     * @param cause the cause
+     */
+    public HCatException(ErrorType errorType, String extraMessage, Throwable cause) {
+        super(buildErrorMessage(
+            errorType,
+            extraMessage,
+            cause), cause);
+        this.errorType = errorType;
+    }
 
 
-  /**
-   * Is this a retriable error.
-   * @return is it retriable
-   */
-  public boolean isRetriable() {
-    return errorType.isRetriable();
-  }
+    //TODO : remove default error type constructors after all exceptions
+    //are changed to use error types
 
-  /**
-   * Gets the error type.
-   * @return the error type enum
-   */
-  public ErrorType getErrorType() {
-    return errorType;
-  }
+    /**
+     * Instantiates a new hcat exception.
+     * @param message the error message
+     */
+    public HCatException(String message) {
+        this(ErrorType.ERROR_INTERNAL_EXCEPTION, message, null);
+    }
 
-  /**
-   * Gets the error code.
-   * @return the error code
-   */
-  public int getErrorCode() {
-    return errorType.getErrorCode();
-  }
+    /**
+     * Instantiates a new hcat exception.
+     * @param message the error message
+     * @param cause the cause
+     */
+    public HCatException(String message, Throwable cause) {
+        this(ErrorType.ERROR_INTERNAL_EXCEPTION, message, cause);
+    }
 
-  /* (non-Javadoc)
-   * @see java.lang.Throwable#toString()
-   */
-  @Override
-  public String toString() {
-    return getMessage();
-  }
+
+    /**
+     * Builds the error message string. The error type message is appended with the extra message. If appendCause
+     * is true for the error type, then the message of the cause also is added to the message.
+     * @param type the error type
+     * @param extraMessage the extra message string
+     * @param cause the cause for the exception
+     * @return the exception message string
+     */
+    public static String buildErrorMessage(ErrorType type, String extraMessage, Throwable cause) {
+
+        //Initial message is just the error type message
+        StringBuffer message = new StringBuffer(HCatException.class.getName());
+        message.append(" : " + type.getErrorCode());
+        message.append(" : " + type.getErrorMessage());
+
+        if (extraMessage != null) {
+            //Add the extra message value to buffer
+            message.append(" : " + extraMessage);
+        }
+
+        if (type.appendCauseMessage()) {
+            if (cause != null) {
+                //Add the cause message to buffer
+                message.append(". Cause : " + cause.toString());
+            }
+        }
+
+        return message.toString();
+    }
+
+
+    /**
+     * Is this a retriable error.
+     * @return is it retriable
+     */
+    public boolean isRetriable() {
+        return errorType.isRetriable();
+    }
+
+    /**
+     * Gets the error type.
+     * @return the error type enum
+     */
+    public ErrorType getErrorType() {
+        return errorType;
+    }
+
+    /**
+     * Gets the error code.
+     * @return the error code
+     */
+    public int getErrorCode() {
+        return errorType.getErrorCode();
+    }
+
+    /* (non-Javadoc)
+    * @see java.lang.Throwable#toString()
+    */
+    @Override
+    public String toString() {
+        return getMessage();
+    }
 
 }
diff --git a/src/java/org/apache/hcatalog/common/HCatUtil.java b/src/java/org/apache/hcatalog/common/HCatUtil.java
index bee4210..10446e1 100644
--- a/src/java/org/apache/hcatalog/common/HCatUtil.java
+++ b/src/java/org/apache/hcatalog/common/HCatUtil.java
@@ -75,7 +75,7 @@
 
     private static final Logger LOG = LoggerFactory.getLogger(HCatUtil.class);
     private static volatile HiveClientCache hiveClientCache;
-    private final static int DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS = 2*60;
+    private final static int DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS = 2 * 60;
 
     public static boolean checkJobContextIfRunningFromBackend(JobContext j) {
         if (j.getConfiguration().get("mapred.task.id", "").equals("")) {
@@ -105,7 +105,7 @@
         }
         try {
             ByteArrayInputStream serialObj = new ByteArrayInputStream(
-                    decodeBytes(str));
+                decodeBytes(str));
             ObjectInputStream objStream = new ObjectInputStream(serialObj);
             return objStream.readObject();
         } catch (Exception e) {
@@ -136,9 +136,9 @@
     }
 
     public static List<HCatFieldSchema> getHCatFieldSchemaList(
-            FieldSchema... fields) throws HCatException {
+        FieldSchema... fields) throws HCatException {
         List<HCatFieldSchema> result = new ArrayList<HCatFieldSchema>(
-                fields.length);
+            fields.length);
 
         for (FieldSchema f : fields) {
             result.add(HCatSchemaUtils.getHCatFieldSchema(f));
@@ -148,7 +148,7 @@
     }
 
     public static List<HCatFieldSchema> getHCatFieldSchemaList(
-            List<FieldSchema> fields) throws HCatException {
+        List<FieldSchema> fields) throws HCatException {
         if (fields == null) {
             return null;
         } else {
@@ -169,7 +169,7 @@
     }
 
     public static List<FieldSchema> getFieldSchemaList(
-            List<HCatFieldSchema> hcatFields) {
+        List<HCatFieldSchema> hcatFields) {
         if (hcatFields == null) {
             return null;
         } else {
@@ -183,7 +183,7 @@
 
     public static Table getTable(HiveMetaStoreClient client, String dbName, String tableName)
         throws NoSuchObjectException, TException, MetaException {
-      return new Table(client.getTable(dbName, tableName));
+        return new Table(client.getTable(dbName, tableName));
     }
 
     public static HCatSchema getTableSchemaWithPtnCols(Table table) throws IOException {
@@ -229,7 +229,7 @@
      * @throws IOException Signals that an I/O exception has occurred.
      */
     public static List<FieldSchema> validatePartitionSchema(Table table,
-            HCatSchema partitionSchema) throws IOException {
+                                                            HCatSchema partitionSchema) throws IOException {
         Map<String, FieldSchema> partitionKeyMap = new HashMap<String, FieldSchema>();
 
         for (FieldSchema field : table.getPartitionKeys()) {
@@ -242,7 +242,7 @@
         for (int i = 0; i < partitionSchema.getFields().size(); i++) {
 
             FieldSchema field = HCatSchemaUtils.getFieldSchema(partitionSchema
-                    .getFields().get(i));
+                .getFields().get(i));
 
             FieldSchema tableField;
             if (i < tableCols.size()) {
@@ -250,19 +250,19 @@
 
                 if (!tableField.getName().equalsIgnoreCase(field.getName())) {
                     throw new HCatException(
-                            ErrorType.ERROR_SCHEMA_COLUMN_MISMATCH,
-                            "Expected column <" + tableField.getName()
-                                    + "> at position " + (i + 1)
-                                    + ", found column <" + field.getName()
-                                    + ">");
+                        ErrorType.ERROR_SCHEMA_COLUMN_MISMATCH,
+                        "Expected column <" + tableField.getName()
+                            + "> at position " + (i + 1)
+                            + ", found column <" + field.getName()
+                            + ">");
                 }
             } else {
                 tableField = partitionKeyMap.get(field.getName().toLowerCase());
 
                 if (tableField != null) {
                     throw new HCatException(
-                            ErrorType.ERROR_SCHEMA_PARTITION_KEY, "Key <"
-                                    + field.getName() + ">");
+                        ErrorType.ERROR_SCHEMA_PARTITION_KEY, "Key <"
+                        + field.getName() + ">");
                 }
             }
 
@@ -272,16 +272,16 @@
             } else {
                 // field present in both. validate type has not changed
                 TypeInfo partitionType = TypeInfoUtils
-                        .getTypeInfoFromTypeString(field.getType());
+                    .getTypeInfoFromTypeString(field.getType());
                 TypeInfo tableType = TypeInfoUtils
-                        .getTypeInfoFromTypeString(tableField.getType());
+                    .getTypeInfoFromTypeString(tableField.getType());
 
                 if (!partitionType.equals(tableType)) {
                     throw new HCatException(
-                            ErrorType.ERROR_SCHEMA_TYPE_MISMATCH, "Column <"
-                                    + field.getName() + ">, expected <"
-                                    + tableType.getTypeName() + ">, got <"
-                                    + partitionType.getTypeName() + ">");
+                        ErrorType.ERROR_SCHEMA_TYPE_MISMATCH, "Column <"
+                        + field.getName() + ">, expected <"
+                        + tableType.getTypeName() + ">, got <"
+                        + partitionType.getTypeName() + ">");
                 }
             }
         }
@@ -304,16 +304,16 @@
      */
     public static boolean validateMorePermissive(FsAction first, FsAction second) {
         if ((first == FsAction.ALL) || (second == FsAction.NONE)
-                || (first == second)) {
+            || (first == second)) {
             return true;
         }
         switch (first) {
-            case READ_EXECUTE:
-                return ((second == FsAction.READ) || (second == FsAction.EXECUTE));
-            case READ_WRITE:
-                return ((second == FsAction.READ) || (second == FsAction.WRITE));
-            case WRITE_EXECUTE:
-                return ((second == FsAction.WRITE) || (second == FsAction.EXECUTE));
+        case READ_EXECUTE:
+            return ((second == FsAction.READ) || (second == FsAction.EXECUTE));
+        case READ_WRITE:
+            return ((second == FsAction.READ) || (second == FsAction.WRITE));
+        case WRITE_EXECUTE:
+            return ((second == FsAction.WRITE) || (second == FsAction.EXECUTE));
         }
         return false;
     }
@@ -329,18 +329,18 @@
      */
     public static boolean validateExecuteBitPresentIfReadOrWrite(FsAction perms) {
         if ((perms == FsAction.READ) || (perms == FsAction.WRITE)
-                || (perms == FsAction.READ_WRITE)) {
+            || (perms == FsAction.READ_WRITE)) {
             return false;
         }
         return true;
     }
 
     public static Token<org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier> getJobTrackerDelegationToken(
-            Configuration conf, String userName) throws Exception {
+        Configuration conf, String userName) throws Exception {
         // LOG.info("getJobTrackerDelegationToken("+conf+","+userName+")");
         JobClient jcl = new JobClient(new JobConf(conf, HCatOutputFormat.class));
         Token<org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier> t = jcl
-                .getDelegationToken(new Text(userName));
+            .getDelegationToken(new Text(userName));
         // LOG.info("got "+t);
         return t;
 
@@ -348,8 +348,8 @@
     }
 
     public static Token<? extends AbstractDelegationTokenIdentifier> extractThriftToken(
-            String tokenStrForm, String tokenSignature) throws MetaException,
-            TException, IOException {
+        String tokenStrForm, String tokenSignature) throws MetaException,
+        TException, IOException {
         // LOG.info("extractThriftToken("+tokenStrForm+","+tokenSignature+")");
         Token<? extends AbstractDelegationTokenIdentifier> t = new Token<DelegationTokenIdentifier>();
         t.decodeFromUrlString(tokenStrForm);
@@ -369,19 +369,19 @@
      */
     public static HCatStorageHandler getStorageHandler(Configuration conf, StorerInfo storerInfo) throws IOException {
         return getStorageHandler(conf,
-                                 storerInfo.getStorageHandlerClass(),
-                                 storerInfo.getSerdeClass(),
-                                 storerInfo.getIfClass(),
-                                 storerInfo.getOfClass());
+            storerInfo.getStorageHandlerClass(),
+            storerInfo.getSerdeClass(),
+            storerInfo.getIfClass(),
+            storerInfo.getOfClass());
     }
 
     public static HCatStorageHandler getStorageHandler(Configuration conf, PartInfo partitionInfo) throws IOException {
-      return HCatUtil.getStorageHandler(
-          conf,
-          partitionInfo.getStorageHandlerClassName(),
-          partitionInfo.getSerdeClassName(),
-          partitionInfo.getInputFormatClassName(),
-          partitionInfo.getOutputFormatClassName());
+        return HCatUtil.getStorageHandler(
+            conf,
+            partitionInfo.getStorageHandlerClassName(),
+            partitionInfo.getSerdeClassName(),
+            partitionInfo.getInputFormatClassName(),
+            partitionInfo.getOutputFormatClassName());
     }
 
     /**
@@ -401,9 +401,9 @@
                                                        String serDe,
                                                        String inputFormat,
                                                        String outputFormat)
-    throws IOException {
+        throws IOException {
 
-        if ((storageHandler == null) || (storageHandler.equals(FosterStorageHandler.class.getName()))){
+        if ((storageHandler == null) || (storageHandler.equals(FosterStorageHandler.class.getName()))) {
             try {
                 FosterStorageHandler fosterStorageHandler =
                     new FosterStorageHandler(inputFormat, outputFormat, serDe);
@@ -411,57 +411,57 @@
                 return fosterStorageHandler;
             } catch (ClassNotFoundException e) {
                 throw new IOException("Failed to load "
-                    + "foster storage handler",e);
+                    + "foster storage handler", e);
             }
         }
 
         try {
             Class<? extends HCatStorageHandler> handlerClass =
-                        (Class<? extends HCatStorageHandler>) Class
+                (Class<? extends HCatStorageHandler>) Class
                     .forName(storageHandler, true, JavaUtils.getClassLoader());
-            return (HCatStorageHandler)ReflectionUtils.newInstance(
-                                                            handlerClass, conf);
+            return (HCatStorageHandler) ReflectionUtils.newInstance(
+                handlerClass, conf);
         } catch (ClassNotFoundException e) {
             throw new IOException("Error in loading storage handler."
-                    + e.getMessage(), e);
+                + e.getMessage(), e);
         }
     }
 
-    public static Pair<String,String> getDbAndTableName(String tableName) throws IOException{
-      String[] dbTableNametokens = tableName.split("\\.");
-      if(dbTableNametokens.length == 1) {
-        return new Pair<String,String>(MetaStoreUtils.DEFAULT_DATABASE_NAME,tableName);
-      }else if (dbTableNametokens.length == 2) {
-        return new Pair<String, String>(dbTableNametokens[0], dbTableNametokens[1]);
-      }else{
-        throw new IOException("tableName expected in the form "
-            +"<databasename>.<table name> or <table name>. Got " + tableName);
-      }
+    public static Pair<String, String> getDbAndTableName(String tableName) throws IOException {
+        String[] dbTableNametokens = tableName.split("\\.");
+        if (dbTableNametokens.length == 1) {
+            return new Pair<String, String>(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
+        } else if (dbTableNametokens.length == 2) {
+            return new Pair<String, String>(dbTableNametokens[0], dbTableNametokens[1]);
+        } else {
+            throw new IOException("tableName expected in the form "
+                + "<databasename>.<table name> or <table name>. Got " + tableName);
+        }
     }
 
     public static Map<String, String>
-      getInputJobProperties(HCatStorageHandler storageHandler,
-                            InputJobInfo inputJobInfo) {
+    getInputJobProperties(HCatStorageHandler storageHandler,
+                          InputJobInfo inputJobInfo) {
         TableDesc tableDesc = new TableDesc(storageHandler.getSerDeClass(),
-                  storageHandler.getInputFormatClass(),
-                  storageHandler.getOutputFormatClass(),
-                  inputJobInfo.getTableInfo().getStorerInfo().getProperties());
-        if(tableDesc.getJobProperties() == null) {
+            storageHandler.getInputFormatClass(),
+            storageHandler.getOutputFormatClass(),
+            inputJobInfo.getTableInfo().getStorerInfo().getProperties());
+        if (tableDesc.getJobProperties() == null) {
             tableDesc.setJobProperties(new HashMap<String, String>());
         }
 
-        Map<String,String> jobProperties = new HashMap<String,String>();
+        Map<String, String> jobProperties = new HashMap<String, String>();
         try {
             tableDesc.getJobProperties().put(
                 HCatConstants.HCAT_KEY_JOB_INFO,
                 HCatUtil.serialize(inputJobInfo));
 
             storageHandler.configureInputJobProperties(tableDesc,
-                                                                jobProperties);
+                jobProperties);
 
         } catch (IOException e) {
             throw new IllegalStateException(
-                "Failed to configure StorageHandler",e);
+                "Failed to configure StorageHandler", e);
         }
 
         return jobProperties;
@@ -469,36 +469,36 @@
 
 
     public static void
-      configureOutputStorageHandler(HCatStorageHandler storageHandler,
-                                    JobContext context,
-                                    OutputJobInfo outputJobInfo) {
+    configureOutputStorageHandler(HCatStorageHandler storageHandler,
+                                  JobContext context,
+                                  OutputJobInfo outputJobInfo) {
         //TODO replace IgnoreKeyTextOutputFormat with a
         //HiveOutputFormatWrapper in StorageHandler
         TableDesc tableDesc = new TableDesc(storageHandler.getSerDeClass(),
-                  storageHandler.getInputFormatClass(),
-                  IgnoreKeyTextOutputFormat.class,
-                  outputJobInfo.getTableInfo().getStorerInfo().getProperties());
-        if(tableDesc.getJobProperties() == null)
+            storageHandler.getInputFormatClass(),
+            IgnoreKeyTextOutputFormat.class,
+            outputJobInfo.getTableInfo().getStorerInfo().getProperties());
+        if (tableDesc.getJobProperties() == null)
             tableDesc.setJobProperties(new HashMap<String, String>());
-        for (Map.Entry<String,String> el: context.getConfiguration()) {
-           tableDesc.getJobProperties().put(el.getKey(),el.getValue());
+        for (Map.Entry<String, String> el : context.getConfiguration()) {
+            tableDesc.getJobProperties().put(el.getKey(), el.getValue());
         }
 
-        Map<String,String> jobProperties = new HashMap<String,String>();
+        Map<String, String> jobProperties = new HashMap<String, String>();
         try {
             tableDesc.getJobProperties().put(
                 HCatConstants.HCAT_KEY_OUTPUT_INFO,
                 HCatUtil.serialize(outputJobInfo));
 
             storageHandler.configureOutputJobProperties(tableDesc,
-                                                        jobProperties);
+                jobProperties);
 
-            for(Map.Entry<String,String> el: jobProperties.entrySet()) {
-                context.getConfiguration().set(el.getKey(),el.getValue());
+            for (Map.Entry<String, String> el : jobProperties.entrySet()) {
+                context.getConfiguration().set(el.getKey(), el.getValue());
             }
         } catch (IOException e) {
             throw new IllegalStateException(
-                "Failed to configure StorageHandler",e);
+                "Failed to configure StorageHandler", e);
         }
     }
 
@@ -509,8 +509,8 @@
      */
     public static void copyConf(Configuration src, Configuration dest) {
         dest.clear();
-        for(Map.Entry<String,String> el : src) {
-            dest.set(el.getKey(),el.getValue());
+        for (Map.Entry<String, String> el : src) {
+            dest.set(el.getKey(), el.getValue());
         }
     }
 
@@ -522,16 +522,16 @@
      * @throws IOException
      */
     public static HiveMetaStoreClient getHiveClient(HiveConf hiveConf)
-            throws MetaException, IOException {
+        throws MetaException, IOException {
 
         // Singleton behaviour: create the cache instance if required. The cache needs to be created lazily and
         // using the expiry time available in hiveConf.
 
-        if(hiveClientCache == null ) {
+        if (hiveClientCache == null) {
             synchronized (HiveMetaStoreClient.class) {
-                if(hiveClientCache == null) {
+                if (hiveClientCache == null) {
                     hiveClientCache = new HiveClientCache(hiveConf.getInt(HCatConstants.HCAT_HIVE_CLIENT_EXPIRY_TIME,
-                            DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS));
+                        DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS));
                 }
             }
         }
@@ -552,65 +552,63 @@
     }
 
     public static HiveConf getHiveConf(Configuration conf)
-      throws IOException {
+        throws IOException {
 
-      HiveConf hiveConf = new HiveConf(conf, HCatUtil.class);
+        HiveConf hiveConf = new HiveConf(conf, HCatUtil.class);
 
-      //copy the hive conf into the job conf and restore it
-      //in the backend context
-      if( conf.get(HCatConstants.HCAT_KEY_HIVE_CONF) == null ) {
-        conf.set(HCatConstants.HCAT_KEY_HIVE_CONF,
-            HCatUtil.serialize(hiveConf.getAllProperties()));
-      } else {
-        //Copy configuration properties into the hive conf
-        Properties properties = (Properties) HCatUtil.deserialize(
-            conf.get(HCatConstants.HCAT_KEY_HIVE_CONF));
+        //copy the hive conf into the job conf and restore it
+        //in the backend context
+        if (conf.get(HCatConstants.HCAT_KEY_HIVE_CONF) == null) {
+            conf.set(HCatConstants.HCAT_KEY_HIVE_CONF,
+                HCatUtil.serialize(hiveConf.getAllProperties()));
+        } else {
+            //Copy configuration properties into the hive conf
+            Properties properties = (Properties) HCatUtil.deserialize(
+                conf.get(HCatConstants.HCAT_KEY_HIVE_CONF));
 
-        for(Map.Entry<Object, Object> prop : properties.entrySet() ) {
-          if( prop.getValue() instanceof String ) {
-            hiveConf.set((String) prop.getKey(), (String) prop.getValue());
-          } else if( prop.getValue() instanceof Integer ) {
-            hiveConf.setInt((String) prop.getKey(),
-                (Integer) prop.getValue());
-          } else if( prop.getValue() instanceof Boolean ) {
-            hiveConf.setBoolean((String) prop.getKey(),
-                (Boolean) prop.getValue());
-          } else if( prop.getValue() instanceof Long ) {
-            hiveConf.setLong((String) prop.getKey(), (Long) prop.getValue());
-          } else if( prop.getValue() instanceof Float ) {
-            hiveConf.setFloat((String) prop.getKey(),
-                (Float) prop.getValue());
-          }
+            for (Map.Entry<Object, Object> prop : properties.entrySet()) {
+                if (prop.getValue() instanceof String) {
+                    hiveConf.set((String) prop.getKey(), (String) prop.getValue());
+                } else if (prop.getValue() instanceof Integer) {
+                    hiveConf.setInt((String) prop.getKey(),
+                        (Integer) prop.getValue());
+                } else if (prop.getValue() instanceof Boolean) {
+                    hiveConf.setBoolean((String) prop.getKey(),
+                        (Boolean) prop.getValue());
+                } else if (prop.getValue() instanceof Long) {
+                    hiveConf.setLong((String) prop.getKey(), (Long) prop.getValue());
+                } else if (prop.getValue() instanceof Float) {
+                    hiveConf.setFloat((String) prop.getKey(),
+                        (Float) prop.getValue());
+                }
+            }
         }
-      }
 
-      if(conf.get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
-        hiveConf.set("hive.metastore.token.signature",
-                     conf.get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE));
-      }
+        if (conf.get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
+            hiveConf.set("hive.metastore.token.signature",
+                conf.get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE));
+        }
 
-      return hiveConf;
+        return hiveConf;
     }
 
 
-    public static JobConf getJobConfFromContext(JobContext jobContext)
-    {
-      JobConf jobConf;
-      // we need to convert the jobContext into a jobConf
-      // 0.18 jobConf (Hive) vs 0.20+ jobContext (HCat)
-      // begin conversion..
-      jobConf = new JobConf(jobContext.getConfiguration());
-      // ..end of conversion
+    public static JobConf getJobConfFromContext(JobContext jobContext) {
+        JobConf jobConf;
+        // we need to convert the jobContext into a jobConf
+        // 0.18 jobConf (Hive) vs 0.20+ jobContext (HCat)
+        // begin conversion..
+        jobConf = new JobConf(jobContext.getConfiguration());
+        // ..end of conversion
 
 
-      return jobConf;
+        return jobConf;
     }
 
     public static void copyJobPropertiesToJobConf(
-                    Map<String, String>jobProperties, JobConf jobConf)
-    {
-      for (Map.Entry<String, String> entry : jobProperties.entrySet()) {
-        jobConf.set(entry.getKey(), entry.getValue());
-      }
+        Map<String, String> jobProperties, JobConf jobConf) {
+        for (Map.Entry<String, String> entry : jobProperties.entrySet()) {
+            jobConf.set(entry.getKey(), entry.getValue());
+        }
     }
 }
diff --git a/src/java/org/apache/hcatalog/common/HiveClientCache.java b/src/java/org/apache/hcatalog/common/HiveClientCache.java
index e45ab3c..64dc690 100644
--- a/src/java/org/apache/hcatalog/common/HiveClientCache.java
+++ b/src/java/org/apache/hcatalog/common/HiveClientCache.java
@@ -57,11 +57,12 @@
     // Thread local variable containing each thread's unique ID, is used as one of the keys for the cache
     // causing each thread to get a different client even if the hiveConf is same.
     private static final ThreadLocal<Integer> threadId =
-            new ThreadLocal<Integer>() {
-                @Override protected Integer initialValue() {
-                    return nextId.getAndIncrement();
-                }
-            };
+        new ThreadLocal<Integer>() {
+            @Override
+            protected Integer initialValue() {
+                return nextId.getAndIncrement();
+            }
+        };
 
     private int getThreadId() {
         return threadId.get();
@@ -70,39 +71,39 @@
     /**
      * @param timeout the length of time in seconds after a client is created that it should be automatically removed
      */
-     public HiveClientCache(final int timeout) {
-         this.timeout = timeout;
-         RemovalListener<HiveClientCacheKey, CacheableHiveMetaStoreClient> removalListener =
-             new RemovalListener<HiveClientCacheKey, CacheableHiveMetaStoreClient>() {
-                 public void onRemoval(RemovalNotification<HiveClientCacheKey, CacheableHiveMetaStoreClient> notification) {
-                     CacheableHiveMetaStoreClient hiveMetaStoreClient = notification.getValue();
-                     if (hiveMetaStoreClient != null) {
-                         synchronized (CACHE_TEARDOWN_LOCK) {
+    public HiveClientCache(final int timeout) {
+        this.timeout = timeout;
+        RemovalListener<HiveClientCacheKey, CacheableHiveMetaStoreClient> removalListener =
+            new RemovalListener<HiveClientCacheKey, CacheableHiveMetaStoreClient>() {
+                public void onRemoval(RemovalNotification<HiveClientCacheKey, CacheableHiveMetaStoreClient> notification) {
+                    CacheableHiveMetaStoreClient hiveMetaStoreClient = notification.getValue();
+                    if (hiveMetaStoreClient != null) {
+                        synchronized (CACHE_TEARDOWN_LOCK) {
                             hiveMetaStoreClient.setExpiredFromCache();
                             hiveMetaStoreClient.tearDownIfUnused();
-                         }
-                     }
-                 }
-             };
-         hiveCache = CacheBuilder.newBuilder()
-                 .expireAfterWrite(timeout, TimeUnit.SECONDS)
-                 .removalListener(removalListener)
-                 .build();
+                        }
+                    }
+                }
+            };
+        hiveCache = CacheBuilder.newBuilder()
+            .expireAfterWrite(timeout, TimeUnit.SECONDS)
+            .removalListener(removalListener)
+            .build();
 
-         // Add a shutdown hook for cleanup, if there are elements remaining in the cache which were not cleaned up.
-         // This is the best effort approach. Ignore any error while doing so. Notice that most of the clients
-         // would get cleaned up via either the removalListener or the close() call, only the active clients
-         // that are in the cache or expired but being used in other threads wont get cleaned. The following code will only
-         // clean the active cache ones. The ones expired from cache but being hold by other threads are in the mercy
-         // of finalize() being called.
-         Thread cleanupHiveClientShutdownThread = new Thread() {
-             @Override
-             public void run() {
-                 LOG.info("Cleaning up hive client cache in ShutDown hook");
-                 closeAllClientsQuietly();
-             }
-         };
-         Runtime.getRuntime().addShutdownHook(cleanupHiveClientShutdownThread);
+        // Add a shutdown hook for cleanup, if there are elements remaining in the cache which were not cleaned up.
+        // This is the best effort approach. Ignore any error while doing so. Notice that most of the clients
+        // would get cleaned up via either the removalListener or the close() call, only the active clients
+        // that are in the cache or expired but being used in other threads wont get cleaned. The following code will only
+        // clean the active cache ones. The ones expired from cache but being hold by other threads are in the mercy
+        // of finalize() being called.
+        Thread cleanupHiveClientShutdownThread = new Thread() {
+            @Override
+            public void run() {
+                LOG.info("Cleaning up hive client cache in ShutDown hook");
+                closeAllClientsQuietly();
+            }
+        };
+        Runtime.getRuntime().addShutdownHook(cleanupHiveClientShutdownThread);
     }
 
     /**
@@ -215,36 +216,36 @@
             if (o == null || getClass() != o.getClass()) return false;
             HiveClientCacheKey that = (HiveClientCacheKey) o;
             return new EqualsBuilder().
-                    append(this.metaStoreURIs,
-                            that.metaStoreURIs).
-                    append(this.ugi, that.ugi).
-                    append(this.threadId, that.threadId).isEquals();
+                append(this.metaStoreURIs,
+                    that.metaStoreURIs).
+                append(this.ugi, that.ugi).
+                append(this.threadId, that.threadId).isEquals();
         }
 
         @Override
         public int hashCode() {
             return new HashCodeBuilder().
-                    append(metaStoreURIs).
-                    append(ugi).
-                    append(threadId).toHashCode();
+                append(metaStoreURIs).
+                append(ugi).
+                append(threadId).toHashCode();
         }
     }
 
     /**
      * Add # of current users on HiveMetaStoreClient, so that the client can be cleaned when no one is using it.
      */
-    public static class CacheableHiveMetaStoreClient extends HiveMetaStoreClient  {
-        private  AtomicInteger users = new AtomicInteger(0);
+    public static class CacheableHiveMetaStoreClient extends HiveMetaStoreClient {
+        private AtomicInteger users = new AtomicInteger(0);
         private volatile boolean expiredFromCache = false;
         private boolean isClosed = false;
         private final long expiryTime;
-        private static final int EXPIRY_TIME_EXTENSION_IN_MILLIS = 60*1000;
+        private static final int EXPIRY_TIME_EXTENSION_IN_MILLIS = 60 * 1000;
 
         public CacheableHiveMetaStoreClient(final HiveConf conf, final int timeout) throws MetaException {
             super(conf);
             // Extend the expiry time with some extra time on top of guava expiry time to make sure
             // that items closed() are for sure expired and would never be returned by guava.
-            this.expiryTime = System.currentTimeMillis() + timeout*1000 + EXPIRY_TIME_EXTENSION_IN_MILLIS;
+            this.expiryTime = System.currentTimeMillis() + timeout * 1000 + EXPIRY_TIME_EXTENSION_IN_MILLIS;
         }
 
         private void acquire() {
@@ -287,9 +288,9 @@
          * This *MUST* be called by anyone who uses this client.
          */
         @Override
-        public void close(){
+        public void close() {
             release();
-            if(System.currentTimeMillis() >= expiryTime)
+            if (System.currentTimeMillis() >= expiryTime)
                 setExpiredFromCache();
             tearDownIfUnused();
         }
@@ -300,7 +301,7 @@
          *  2. It has expired from the cache
          */
         private void tearDownIfUnused() {
-            if(users.get() == 0 && expiredFromCache) {
+            if (users.get() == 0 && expiredFromCache) {
                 this.tearDown();
             }
         }
@@ -310,11 +311,11 @@
          */
         protected synchronized void tearDown() {
             try {
-                if(!isClosed) {
+                if (!isClosed) {
                     super.close();
                 }
                 isClosed = true;
-            } catch(Exception e) {
+            } catch (Exception e) {
                 LOG.warn("Error closing hive metastore client. Ignored.", e);
             }
         }
diff --git a/src/java/org/apache/hcatalog/data/DataType.java b/src/java/org/apache/hcatalog/data/DataType.java
index a0a0739..687a67b 100644
--- a/src/java/org/apache/hcatalog/data/DataType.java
+++ b/src/java/org/apache/hcatalog/data/DataType.java
@@ -27,179 +27,180 @@
 
 public abstract class DataType {
 
-  public static final byte NULL      =   1;
-  public static final byte BOOLEAN   =   5;
-  public static final byte BYTE      =   6;
-  public static final byte INTEGER   =  10;
-  public static final byte SHORT     =  11;
-  public static final byte LONG      =  15;
-  public static final byte FLOAT     =  20;
-  public static final byte DOUBLE    =  25;
-  public static final byte STRING    =  55;
-  public static final byte BINARY    =  60;
+    public static final byte NULL = 1;
+    public static final byte BOOLEAN = 5;
+    public static final byte BYTE = 6;
+    public static final byte INTEGER = 10;
+    public static final byte SHORT = 11;
+    public static final byte LONG = 15;
+    public static final byte FLOAT = 20;
+    public static final byte DOUBLE = 25;
+    public static final byte STRING = 55;
+    public static final byte BINARY = 60;
 
-  public static final byte MAP       = 100;
-  public static final byte STRUCT    = 110;
-  public static final byte LIST      = 120;
-  public static final byte ERROR     =  -1;
+    public static final byte MAP = 100;
+    public static final byte STRUCT = 110;
+    public static final byte LIST = 120;
+    public static final byte ERROR = -1;
 
-  /**
-   * Determine the datatype of an object.
-   * @param o Object to test.
-   * @return byte code of the type, or ERROR if we don't know.
-   */
-  public static byte findType(Object o) {
-    if (o == null) {
-      return NULL;
-    }
-
-    Class<?> clazz = o.getClass();
-
-    // Try to put the most common first
-    if (clazz == String.class) {
-      return STRING;
-    } else if (clazz == Integer.class) {
-      return INTEGER;
-    } else if (clazz == Long.class) {
-      return LONG;
-    } else if (clazz == Float.class) {
-      return FLOAT;
-    } else if (clazz == Double.class) {
-      return DOUBLE;
-    } else if (clazz == Boolean.class) {
-      return BOOLEAN;
-    } else if (clazz == Byte.class) {
-      return BYTE;
-    } else if (clazz == Short.class) {
-      return SHORT;
-    } else if (o instanceof List<?>) {
-      return LIST;
-    } else if (o instanceof Map<?,?>) {
-      return MAP;
-    }else if (o instanceof byte[]) {
-        return BINARY;
-    } else {return ERROR;}
-  }
-
-  public static int compare(Object o1, Object o2) {
-
-    return compare(o1, o2, findType(o1),findType(o2));
-  }
-
-  public static int compare(Object o1, Object o2, byte dt1, byte dt2) {
-    if (dt1 == dt2) {
-      switch (dt1) {
-      case NULL:
-        return 0;
-
-      case BOOLEAN:
-        return ((Boolean)o1).compareTo((Boolean)o2);
-
-      case BYTE:
-        return ((Byte)o1).compareTo((Byte)o2);
-
-      case INTEGER:
-        return ((Integer)o1).compareTo((Integer)o2);
-
-      case LONG:
-        return ((Long)o1).compareTo((Long)o2);
-
-      case FLOAT:
-        return ((Float)o1).compareTo((Float)o2);
-
-      case DOUBLE:
-        return ((Double)o1).compareTo((Double)o2);
-
-      case STRING:
-        return ((String)o1).compareTo((String)o2);
-
-      case SHORT:
-        return ((Short)o1).compareTo((Short)o2);
-        
-      case BINARY:
-        return compareByteArray((byte[])o1, (byte[])o2);
-
-      case LIST:
-        List<?> l1 = (List<?>)o1;
-        List<?> l2 = (List<?>)o2;
-        int len = l1.size();
-        if(len != l2.size()) {
-          return len - l2.size();
-        } else{
-          for(int i =0; i < len; i++){
-            int cmpVal = compare(l1.get(i), l2.get(i));
-            if(cmpVal != 0) {
-              return cmpVal;
-            }
-          }
-          return 0;
+    /**
+     * Determine the datatype of an object.
+     * @param o Object to test.
+     * @return byte code of the type, or ERROR if we don't know.
+     */
+    public static byte findType(Object o) {
+        if (o == null) {
+            return NULL;
         }
 
-      case MAP: {
-        Map<?,?> m1 = (Map<?,?>)o1;
-        Map<?,?> m2 = (Map<?,?>)o2;
-        int sz1 = m1.size();
-        int sz2 = m2.size();
-        if (sz1 < sz2) {
-          return -1;
-        } else if (sz1 > sz2) {
-          return 1;
+        Class<?> clazz = o.getClass();
+
+        // Try to put the most common first
+        if (clazz == String.class) {
+            return STRING;
+        } else if (clazz == Integer.class) {
+            return INTEGER;
+        } else if (clazz == Long.class) {
+            return LONG;
+        } else if (clazz == Float.class) {
+            return FLOAT;
+        } else if (clazz == Double.class) {
+            return DOUBLE;
+        } else if (clazz == Boolean.class) {
+            return BOOLEAN;
+        } else if (clazz == Byte.class) {
+            return BYTE;
+        } else if (clazz == Short.class) {
+            return SHORT;
+        } else if (o instanceof List<?>) {
+            return LIST;
+        } else if (o instanceof Map<?, ?>) {
+            return MAP;
+        } else if (o instanceof byte[]) {
+            return BINARY;
         } else {
-          // This is bad, but we have to sort the keys of the maps in order
-          // to be commutative.
-          TreeMap<Object,Object> tm1 = new TreeMap<Object,Object>(m1);
-          TreeMap<Object, Object> tm2 = new TreeMap<Object,Object>(m2);
-          Iterator<Entry<Object, Object>> i1 = tm1.entrySet().iterator();
-          Iterator<Entry<Object, Object> > i2 = tm2.entrySet().iterator();
-          while (i1.hasNext()) {
-            Map.Entry<Object, Object> entry1 = i1.next();
-            Map.Entry<Object, Object> entry2 = i2.next();
-            int c = compare(entry1.getValue(), entry2.getValue());
-            if (c != 0) {
-              return c;
-            } else {
-              c = compare(entry1.getValue(), entry2.getValue());
-              if (c != 0) {
-                return c;
-              }
-            }
-          }
-          return 0;
+            return ERROR;
         }
-      }
-
-      default:
-        throw new RuntimeException("Unkown type " + dt1 +
-        " in compare");
-      }
-    } else {
-      return dt1 < dt2 ? -1 : 1;
-    }
-  }
-
-  private static int compareByteArray(byte[] o1, byte[] o2) {
-    
-    for(int i = 0; i < o1.length; i++){
-      if(i == o2.length){
-        return 1;
-      }
-      if(o1[i] == o2[i]){
-        continue;
-      }
-      if(o1[i] > o1[i]){
-        return 1;
-      }
-      else{
-        return -1;
-      }
     }
 
-    //bytes in o1 are same as o2
-    //in case o2 was longer
-    if(o2.length > o1.length){
-      return -1;
+    public static int compare(Object o1, Object o2) {
+
+        return compare(o1, o2, findType(o1), findType(o2));
     }
-    return 0; //equals
-  }
+
+    public static int compare(Object o1, Object o2, byte dt1, byte dt2) {
+        if (dt1 == dt2) {
+            switch (dt1) {
+            case NULL:
+                return 0;
+
+            case BOOLEAN:
+                return ((Boolean) o1).compareTo((Boolean) o2);
+
+            case BYTE:
+                return ((Byte) o1).compareTo((Byte) o2);
+
+            case INTEGER:
+                return ((Integer) o1).compareTo((Integer) o2);
+
+            case LONG:
+                return ((Long) o1).compareTo((Long) o2);
+
+            case FLOAT:
+                return ((Float) o1).compareTo((Float) o2);
+
+            case DOUBLE:
+                return ((Double) o1).compareTo((Double) o2);
+
+            case STRING:
+                return ((String) o1).compareTo((String) o2);
+
+            case SHORT:
+                return ((Short) o1).compareTo((Short) o2);
+
+            case BINARY:
+                return compareByteArray((byte[]) o1, (byte[]) o2);
+
+            case LIST:
+                List<?> l1 = (List<?>) o1;
+                List<?> l2 = (List<?>) o2;
+                int len = l1.size();
+                if (len != l2.size()) {
+                    return len - l2.size();
+                } else {
+                    for (int i = 0; i < len; i++) {
+                        int cmpVal = compare(l1.get(i), l2.get(i));
+                        if (cmpVal != 0) {
+                            return cmpVal;
+                        }
+                    }
+                    return 0;
+                }
+
+            case MAP: {
+                Map<?, ?> m1 = (Map<?, ?>) o1;
+                Map<?, ?> m2 = (Map<?, ?>) o2;
+                int sz1 = m1.size();
+                int sz2 = m2.size();
+                if (sz1 < sz2) {
+                    return -1;
+                } else if (sz1 > sz2) {
+                    return 1;
+                } else {
+                    // This is bad, but we have to sort the keys of the maps in order
+                    // to be commutative.
+                    TreeMap<Object, Object> tm1 = new TreeMap<Object, Object>(m1);
+                    TreeMap<Object, Object> tm2 = new TreeMap<Object, Object>(m2);
+                    Iterator<Entry<Object, Object>> i1 = tm1.entrySet().iterator();
+                    Iterator<Entry<Object, Object>> i2 = tm2.entrySet().iterator();
+                    while (i1.hasNext()) {
+                        Map.Entry<Object, Object> entry1 = i1.next();
+                        Map.Entry<Object, Object> entry2 = i2.next();
+                        int c = compare(entry1.getValue(), entry2.getValue());
+                        if (c != 0) {
+                            return c;
+                        } else {
+                            c = compare(entry1.getValue(), entry2.getValue());
+                            if (c != 0) {
+                                return c;
+                            }
+                        }
+                    }
+                    return 0;
+                }
+            }
+
+            default:
+                throw new RuntimeException("Unkown type " + dt1 +
+                    " in compare");
+            }
+        } else {
+            return dt1 < dt2 ? -1 : 1;
+        }
+    }
+
+    private static int compareByteArray(byte[] o1, byte[] o2) {
+
+        for (int i = 0; i < o1.length; i++) {
+            if (i == o2.length) {
+                return 1;
+            }
+            if (o1[i] == o2[i]) {
+                continue;
+            }
+            if (o1[i] > o1[i]) {
+                return 1;
+            } else {
+                return -1;
+            }
+        }
+
+        //bytes in o1 are same as o2
+        //in case o2 was longer
+        if (o2.length > o1.length) {
+            return -1;
+        }
+        return 0; //equals
+    }
 
 }
diff --git a/src/java/org/apache/hcatalog/data/DefaultHCatRecord.java b/src/java/org/apache/hcatalog/data/DefaultHCatRecord.java
index 25af53d..d8b30ee 100644
--- a/src/java/org/apache/hcatalog/data/DefaultHCatRecord.java
+++ b/src/java/org/apache/hcatalog/data/DefaultHCatRecord.java
@@ -31,20 +31,20 @@
 
     private List<Object> contents;
 
-    public DefaultHCatRecord(){
+    public DefaultHCatRecord() {
         contents = new ArrayList<Object>();
     }
 
-    public DefaultHCatRecord(int size){
+    public DefaultHCatRecord(int size) {
         contents = new ArrayList<Object>(size);
-        for(int i=0; i < size; i++){
+        for (int i = 0; i < size; i++) {
             contents.add(null);
         }
     }
 
     @Override
     public void remove(int idx) throws HCatException {
-      contents.remove(idx);
+        contents.remove(idx);
     }
 
     public DefaultHCatRecord(List<Object> list) {
@@ -76,7 +76,7 @@
 
         contents.clear();
         int len = in.readInt();
-        for(int i =0; i < len; i++){
+        for (int i = 0; i < len; i++) {
             contents.add(ReaderWriter.readDatum(in));
         }
     }
@@ -106,8 +106,8 @@
     public String toString() {
 
         StringBuilder sb = new StringBuilder();
-        for(Object o : contents) {
-            sb.append(o+"\t");
+        for (Object o : contents) {
+            sb.append(o + "\t");
         }
         return sb.toString();
     }
@@ -119,12 +119,12 @@
 
     @Override
     public void set(String fieldName, HCatSchema recordSchema, Object value) throws HCatException {
-        set(recordSchema.getPosition(fieldName),value);
+        set(recordSchema.getPosition(fieldName), value);
     }
 
     @Override
     public void copy(HCatRecord r) throws HCatException {
-      this.contents = r.getAll();
+        this.contents = r.getAll();
     }
 
 }
diff --git a/src/java/org/apache/hcatalog/data/HCatRecord.java b/src/java/org/apache/hcatalog/data/HCatRecord.java
index 5735b05..131d8d0 100644
--- a/src/java/org/apache/hcatalog/data/HCatRecord.java
+++ b/src/java/org/apache/hcatalog/data/HCatRecord.java
@@ -33,13 +33,16 @@
 public abstract class HCatRecord implements HCatRecordable {
 
     public abstract Object get(String fieldName, HCatSchema recordSchema) throws HCatException;
-    public abstract void set(String fieldName, HCatSchema recordSchema, Object value ) throws HCatException;
+
+    public abstract void set(String fieldName, HCatSchema recordSchema, Object value) throws HCatException;
+
     public abstract void remove(int idx) throws HCatException;
+
     public abstract void copy(HCatRecord r) throws HCatException;
 
-    protected Object get(String fieldName, HCatSchema recordSchema, Class clazz) throws HCatException{
+    protected Object get(String fieldName, HCatSchema recordSchema, Class clazz) throws HCatException {
         // TODO : if needed, verify that recordschema entry for fieldname matches appropriate type.
-        return get(fieldName,recordSchema);
+        return get(fieldName, recordSchema);
     }
 
     public Boolean getBoolean(String fieldName, HCatSchema recordSchema) throws HCatException {
@@ -47,15 +50,15 @@
     }
 
     public void setBoolean(String fieldName, HCatSchema recordSchema, Boolean value) throws HCatException {
-        set(fieldName,recordSchema,value);
+        set(fieldName, recordSchema, value);
     }
-    
+
     public byte[] getByteArray(String fieldName, HCatSchema recordSchema) throws HCatException {
         return (byte[]) get(fieldName, recordSchema, byte[].class);
     }
 
     public void setByteArray(String fieldName, HCatSchema recordSchema, byte[] value) throws HCatException {
-        set(fieldName,recordSchema,value);
+        set(fieldName, recordSchema, value);
     }
 
     public Byte getByte(String fieldName, HCatSchema recordSchema) throws HCatException {
@@ -64,7 +67,7 @@
     }
 
     public void setByte(String fieldName, HCatSchema recordSchema, Byte value) throws HCatException {
-        set(fieldName,recordSchema,value);
+        set(fieldName, recordSchema, value);
     }
 
     public Short getShort(String fieldName, HCatSchema recordSchema) throws HCatException {
@@ -73,73 +76,73 @@
     }
 
     public void setShort(String fieldName, HCatSchema recordSchema, Short value) throws HCatException {
-        set(fieldName,recordSchema,value);
+        set(fieldName, recordSchema, value);
     }
 
     public Integer getInteger(String fieldName, HCatSchema recordSchema) throws HCatException {
-        return (Integer) get(fieldName,recordSchema, Integer.class);
+        return (Integer) get(fieldName, recordSchema, Integer.class);
     }
 
     public void setInteger(String fieldName, HCatSchema recordSchema, Integer value) throws HCatException {
-        set(fieldName,recordSchema,value);
+        set(fieldName, recordSchema, value);
     }
 
     public Long getLong(String fieldName, HCatSchema recordSchema) throws HCatException {
         // BIGINT
-        return (Long) get(fieldName,recordSchema,Long.class);
+        return (Long) get(fieldName, recordSchema, Long.class);
     }
 
     public void setLong(String fieldName, HCatSchema recordSchema, Long value) throws HCatException {
-        set(fieldName,recordSchema,value);
+        set(fieldName, recordSchema, value);
     }
 
     public Float getFloat(String fieldName, HCatSchema recordSchema) throws HCatException {
-        return (Float) get(fieldName,recordSchema,Float.class);
+        return (Float) get(fieldName, recordSchema, Float.class);
     }
 
     public void setFloat(String fieldName, HCatSchema recordSchema, Float value) throws HCatException {
-        set(fieldName,recordSchema,value);
+        set(fieldName, recordSchema, value);
     }
 
     public Double getDouble(String fieldName, HCatSchema recordSchema) throws HCatException {
-        return (Double) get(fieldName,recordSchema,Double.class);
+        return (Double) get(fieldName, recordSchema, Double.class);
     }
 
     public void setDouble(String fieldName, HCatSchema recordSchema, Double value) throws HCatException {
-        set(fieldName,recordSchema,value);
+        set(fieldName, recordSchema, value);
     }
 
     public String getString(String fieldName, HCatSchema recordSchema) throws HCatException {
-        return (String) get(fieldName,recordSchema,String.class);
+        return (String) get(fieldName, recordSchema, String.class);
     }
 
     public void setString(String fieldName, HCatSchema recordSchema, String value) throws HCatException {
-        set(fieldName,recordSchema,value);
+        set(fieldName, recordSchema, value);
     }
 
     @SuppressWarnings("unchecked")
     public List<? extends Object> getStruct(String fieldName, HCatSchema recordSchema) throws HCatException {
-        return (List<? extends Object>) get(fieldName,recordSchema,List.class);
+        return (List<? extends Object>) get(fieldName, recordSchema, List.class);
     }
 
     public void setStruct(String fieldName, HCatSchema recordSchema, List<? extends Object> value) throws HCatException {
-        set(fieldName,recordSchema,value);
+        set(fieldName, recordSchema, value);
     }
 
     public List<?> getList(String fieldName, HCatSchema recordSchema) throws HCatException {
-        return (List<?>) get(fieldName,recordSchema,List.class);
+        return (List<?>) get(fieldName, recordSchema, List.class);
     }
 
     public void setList(String fieldName, HCatSchema recordSchema, List<?> value) throws HCatException {
-        set(fieldName,recordSchema,value);
+        set(fieldName, recordSchema, value);
     }
 
-    public Map<?,?> getMap(String fieldName, HCatSchema recordSchema) throws HCatException {
-        return (Map<?,?>) get(fieldName,recordSchema,Map.class);
+    public Map<?, ?> getMap(String fieldName, HCatSchema recordSchema) throws HCatException {
+        return (Map<?, ?>) get(fieldName, recordSchema, Map.class);
     }
 
-    public void setMap(String fieldName, HCatSchema recordSchema, Map<?,?> value) throws HCatException {
-        set(fieldName,recordSchema,value);
+    public void setMap(String fieldName, HCatSchema recordSchema, Map<?, ?> value) throws HCatException {
+        set(fieldName, recordSchema, value);
     }
 
 }
diff --git a/src/java/org/apache/hcatalog/data/HCatRecordObjectInspector.java b/src/java/org/apache/hcatalog/data/HCatRecordObjectInspector.java
index 060a66f..becadda 100644
--- a/src/java/org/apache/hcatalog/data/HCatRecordObjectInspector.java
+++ b/src/java/org/apache/hcatalog/data/HCatRecordObjectInspector.java
@@ -25,28 +25,28 @@
 
 public class HCatRecordObjectInspector extends StandardStructObjectInspector {
 
-  protected HCatRecordObjectInspector(List<String> structFieldNames,
-      List<ObjectInspector> structFieldObjectInspectors) {
-    super(structFieldNames, structFieldObjectInspectors);
-  }
-
-  @Override
-  public Object getStructFieldData(Object data, StructField fieldRef) {
-    if (data == null){
-      return new IllegalArgumentException("Data passed in to get field from was null!");
+    protected HCatRecordObjectInspector(List<String> structFieldNames,
+                                        List<ObjectInspector> structFieldObjectInspectors) {
+        super(structFieldNames, structFieldObjectInspectors);
     }
 
-    int fieldID = ((MyField) fieldRef).getFieldID();
-    if (!(fieldID >= 0 && fieldID < fields.size())){
-      throw new IllegalArgumentException("Invalid field index ["+fieldID+"]");
+    @Override
+    public Object getStructFieldData(Object data, StructField fieldRef) {
+        if (data == null) {
+            return new IllegalArgumentException("Data passed in to get field from was null!");
+        }
+
+        int fieldID = ((MyField) fieldRef).getFieldID();
+        if (!(fieldID >= 0 && fieldID < fields.size())) {
+            throw new IllegalArgumentException("Invalid field index [" + fieldID + "]");
+        }
+
+        return ((HCatRecord) data).get(fieldID);
     }
 
-    return ((HCatRecord) data).get(fieldID);
-  }
-
-  @Override
-  public List<Object> getStructFieldsDataAsList(Object o) {
-    return ((HCatRecord) o).getAll();
-  }
+    @Override
+    public List<Object> getStructFieldsDataAsList(Object o) {
+        return ((HCatRecord) o).getAll();
+    }
 
 }
diff --git a/src/java/org/apache/hcatalog/data/HCatRecordObjectInspectorFactory.java b/src/java/org/apache/hcatalog/data/HCatRecordObjectInspectorFactory.java
index 384d52f..de9f79d 100644
--- a/src/java/org/apache/hcatalog/data/HCatRecordObjectInspectorFactory.java
+++ b/src/java/org/apache/hcatalog/data/HCatRecordObjectInspectorFactory.java
@@ -38,94 +38,94 @@
  */
 public class HCatRecordObjectInspectorFactory {
 
-  private final static Logger LOG = LoggerFactory.getLogger(HCatRecordObjectInspectorFactory.class);
+    private final static Logger LOG = LoggerFactory.getLogger(HCatRecordObjectInspectorFactory.class);
 
-  static HashMap<TypeInfo, HCatRecordObjectInspector> cachedHCatRecordObjectInspectors =
-      new HashMap<TypeInfo, HCatRecordObjectInspector>();
-  static HashMap<TypeInfo, ObjectInspector> cachedObjectInspectors =
-      new HashMap<TypeInfo, ObjectInspector>();
+    static HashMap<TypeInfo, HCatRecordObjectInspector> cachedHCatRecordObjectInspectors =
+        new HashMap<TypeInfo, HCatRecordObjectInspector>();
+    static HashMap<TypeInfo, ObjectInspector> cachedObjectInspectors =
+        new HashMap<TypeInfo, ObjectInspector>();
 
-  /**
-   * Returns HCatRecordObjectInspector given a StructTypeInfo type definition for the record to look into
-   * @param typeInfo Type definition for the record to look into
-   * @return appropriate HCatRecordObjectInspector
-   * @throws SerDeException
-   */
-  public static HCatRecordObjectInspector getHCatRecordObjectInspector(
-      StructTypeInfo typeInfo) throws SerDeException {
-    HCatRecordObjectInspector oi = cachedHCatRecordObjectInspectors.get(typeInfo);
-    if (oi == null) {
+    /**
+     * Returns HCatRecordObjectInspector given a StructTypeInfo type definition for the record to look into
+     * @param typeInfo Type definition for the record to look into
+     * @return appropriate HCatRecordObjectInspector
+     * @throws SerDeException
+     */
+    public static HCatRecordObjectInspector getHCatRecordObjectInspector(
+        StructTypeInfo typeInfo) throws SerDeException {
+        HCatRecordObjectInspector oi = cachedHCatRecordObjectInspectors.get(typeInfo);
+        if (oi == null) {
 
-      LOG.debug("Got asked for OI for {} [{} ]",typeInfo.getCategory(),typeInfo.getTypeName());
-      switch (typeInfo.getCategory()) {
-      case STRUCT :
-        StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
-        List<String> fieldNames = structTypeInfo.getAllStructFieldNames();
-        List<TypeInfo> fieldTypeInfos = structTypeInfo.getAllStructFieldTypeInfos();
-        List<ObjectInspector> fieldObjectInspectors = new ArrayList<ObjectInspector>(fieldTypeInfos.size());
-        for (int i = 0; i < fieldTypeInfos.size(); i++) {
-          fieldObjectInspectors.add(getStandardObjectInspectorFromTypeInfo(fieldTypeInfos.get(i)));
+            LOG.debug("Got asked for OI for {} [{} ]", typeInfo.getCategory(), typeInfo.getTypeName());
+            switch (typeInfo.getCategory()) {
+            case STRUCT:
+                StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
+                List<String> fieldNames = structTypeInfo.getAllStructFieldNames();
+                List<TypeInfo> fieldTypeInfos = structTypeInfo.getAllStructFieldTypeInfos();
+                List<ObjectInspector> fieldObjectInspectors = new ArrayList<ObjectInspector>(fieldTypeInfos.size());
+                for (int i = 0; i < fieldTypeInfos.size(); i++) {
+                    fieldObjectInspectors.add(getStandardObjectInspectorFromTypeInfo(fieldTypeInfos.get(i)));
+                }
+                oi = new HCatRecordObjectInspector(fieldNames, fieldObjectInspectors);
+
+                break;
+            default:
+                // Hmm.. not good,
+                // the only type expected here is STRUCT, which maps to HCatRecord
+                // - anything else is an error. Return null as the inspector.
+                throw new SerDeException("TypeInfo [" + typeInfo.getTypeName()
+                    + "] was not of struct type - HCatRecord expected struct type, got ["
+                    + typeInfo.getCategory().toString() + "]");
+            }
+            cachedHCatRecordObjectInspectors.put(typeInfo, oi);
         }
-        oi = new HCatRecordObjectInspector(fieldNames,fieldObjectInspectors);
-
-        break;
-      default:
-        // Hmm.. not good,
-        // the only type expected here is STRUCT, which maps to HCatRecord
-        // - anything else is an error. Return null as the inspector.
-        throw new SerDeException("TypeInfo ["+typeInfo.getTypeName()
-            + "] was not of struct type - HCatRecord expected struct type, got ["
-            + typeInfo.getCategory().toString()+"]");
-      }
-      cachedHCatRecordObjectInspectors.put(typeInfo, oi);
+        return oi;
     }
-    return oi;
-  }
 
-  public static ObjectInspector getStandardObjectInspectorFromTypeInfo(TypeInfo typeInfo) {
+    public static ObjectInspector getStandardObjectInspectorFromTypeInfo(TypeInfo typeInfo) {
 
 
-    ObjectInspector oi = cachedObjectInspectors.get(typeInfo);
-    if (oi == null){
+        ObjectInspector oi = cachedObjectInspectors.get(typeInfo);
+        if (oi == null) {
 
-      LOG.debug("Got asked for OI for {}, [{}]",typeInfo.getCategory(), typeInfo.getTypeName());
-      switch (typeInfo.getCategory()) {
-      case PRIMITIVE:
-        oi = PrimitiveObjectInspectorFactory.getPrimitiveJavaObjectInspector(
-            ((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory());
-        break;
-      case STRUCT:
-        StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
-        List<String> fieldNames = structTypeInfo.getAllStructFieldNames();
-        List<TypeInfo> fieldTypeInfos = structTypeInfo.getAllStructFieldTypeInfos();
-        List<ObjectInspector> fieldObjectInspectors =
-            new ArrayList<ObjectInspector>(fieldTypeInfos.size());
-        for (int i = 0; i < fieldTypeInfos.size(); i++) {
-          fieldObjectInspectors.add(getStandardObjectInspectorFromTypeInfo(fieldTypeInfos.get(i)));
+            LOG.debug("Got asked for OI for {}, [{}]", typeInfo.getCategory(), typeInfo.getTypeName());
+            switch (typeInfo.getCategory()) {
+            case PRIMITIVE:
+                oi = PrimitiveObjectInspectorFactory.getPrimitiveJavaObjectInspector(
+                    ((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory());
+                break;
+            case STRUCT:
+                StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
+                List<String> fieldNames = structTypeInfo.getAllStructFieldNames();
+                List<TypeInfo> fieldTypeInfos = structTypeInfo.getAllStructFieldTypeInfos();
+                List<ObjectInspector> fieldObjectInspectors =
+                    new ArrayList<ObjectInspector>(fieldTypeInfos.size());
+                for (int i = 0; i < fieldTypeInfos.size(); i++) {
+                    fieldObjectInspectors.add(getStandardObjectInspectorFromTypeInfo(fieldTypeInfos.get(i)));
+                }
+                oi = ObjectInspectorFactory.getStandardStructObjectInspector(
+                    fieldNames, fieldObjectInspectors
+                );
+                break;
+            case LIST:
+                ObjectInspector elementObjectInspector = getStandardObjectInspectorFromTypeInfo(
+                    ((ListTypeInfo) typeInfo).getListElementTypeInfo());
+                oi = ObjectInspectorFactory.getStandardListObjectInspector(elementObjectInspector);
+                break;
+            case MAP:
+                ObjectInspector keyObjectInspector = getStandardObjectInspectorFromTypeInfo(
+                    ((MapTypeInfo) typeInfo).getMapKeyTypeInfo());
+                ObjectInspector valueObjectInspector = getStandardObjectInspectorFromTypeInfo(
+                    ((MapTypeInfo) typeInfo).getMapValueTypeInfo());
+                oi = ObjectInspectorFactory.getStandardMapObjectInspector(keyObjectInspector, valueObjectInspector);
+                break;
+            default:
+                oi = null;
+            }
+            cachedObjectInspectors.put(typeInfo, oi);
         }
-        oi = ObjectInspectorFactory.getStandardStructObjectInspector(
-            fieldNames, fieldObjectInspectors
-            );
-        break;
-      case LIST:
-        ObjectInspector elementObjectInspector = getStandardObjectInspectorFromTypeInfo(
-            ((ListTypeInfo)typeInfo).getListElementTypeInfo());
-        oi = ObjectInspectorFactory.getStandardListObjectInspector(elementObjectInspector);
-        break;
-      case MAP:
-        ObjectInspector keyObjectInspector = getStandardObjectInspectorFromTypeInfo(
-            ((MapTypeInfo)typeInfo).getMapKeyTypeInfo());
-        ObjectInspector valueObjectInspector = getStandardObjectInspectorFromTypeInfo(
-            ((MapTypeInfo)typeInfo).getMapValueTypeInfo());
-        oi = ObjectInspectorFactory.getStandardMapObjectInspector(keyObjectInspector,valueObjectInspector);
-        break;
-      default:
-        oi = null;
-      }
-      cachedObjectInspectors.put(typeInfo, oi);
+        return oi;
     }
-    return oi;
-  }
 
 
 }
diff --git a/src/java/org/apache/hcatalog/data/HCatRecordSerDe.java b/src/java/org/apache/hcatalog/data/HCatRecordSerDe.java
index e827b27..ca204c3 100644
--- a/src/java/org/apache/hcatalog/data/HCatRecordSerDe.java
+++ b/src/java/org/apache/hcatalog/data/HCatRecordSerDe.java
@@ -54,254 +54,254 @@
 
     private static final Logger LOG = LoggerFactory.getLogger(HCatRecordSerDe.class);
 
-  public HCatRecordSerDe() throws SerDeException{
-  }
-
-  private List<String> columnNames;
-  private List<TypeInfo> columnTypes;
-  private StructTypeInfo rowTypeInfo;
-
-  private HCatRecordObjectInspector cachedObjectInspector;
-
-  @Override
-  public void initialize(Configuration conf, Properties tbl)
-      throws SerDeException {
-
-    LOG.debug("Initializing HCatRecordSerDe");
-    LOG.debug("props to serde: {}",tbl.entrySet());
-
-    // Get column names and types
-    String columnNameProperty = tbl.getProperty(Constants.LIST_COLUMNS);
-    String columnTypeProperty = tbl.getProperty(Constants.LIST_COLUMN_TYPES);
-
-    // all table column names
-    if (columnNameProperty.length() == 0) {
-      columnNames = new ArrayList<String>();
-    } else {
-      columnNames = Arrays.asList(columnNameProperty.split(","));
+    public HCatRecordSerDe() throws SerDeException {
     }
 
-    // all column types
-    if (columnTypeProperty.length() == 0) {
-      columnTypes = new ArrayList<TypeInfo>();
-    } else {
-      columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty);
+    private List<String> columnNames;
+    private List<TypeInfo> columnTypes;
+    private StructTypeInfo rowTypeInfo;
+
+    private HCatRecordObjectInspector cachedObjectInspector;
+
+    @Override
+    public void initialize(Configuration conf, Properties tbl)
+        throws SerDeException {
+
+        LOG.debug("Initializing HCatRecordSerDe");
+        LOG.debug("props to serde: {}", tbl.entrySet());
+
+        // Get column names and types
+        String columnNameProperty = tbl.getProperty(Constants.LIST_COLUMNS);
+        String columnTypeProperty = tbl.getProperty(Constants.LIST_COLUMN_TYPES);
+
+        // all table column names
+        if (columnNameProperty.length() == 0) {
+            columnNames = new ArrayList<String>();
+        } else {
+            columnNames = Arrays.asList(columnNameProperty.split(","));
+        }
+
+        // all column types
+        if (columnTypeProperty.length() == 0) {
+            columnTypes = new ArrayList<TypeInfo>();
+        } else {
+            columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty);
+        }
+
+
+        LOG.debug("columns: {} {}", columnNameProperty, columnNames);
+        LOG.debug("types: {} {}", columnTypeProperty, columnTypes);
+        assert (columnNames.size() == columnTypes.size());
+
+        rowTypeInfo = (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(columnNames, columnTypes);
+        cachedObjectInspector = HCatRecordObjectInspectorFactory.getHCatRecordObjectInspector(rowTypeInfo);
+    }
+
+    public void initialize(HCatSchema hsch) throws SerDeException {
+
+        LOG.debug("Initializing HCatRecordSerDe through HCatSchema {}.", hsch);
+
+        rowTypeInfo = (StructTypeInfo) TypeInfoUtils.getTypeInfoFromTypeString(hsch.getSchemaAsTypeString());
+        cachedObjectInspector = HCatRecordObjectInspectorFactory.getHCatRecordObjectInspector(rowTypeInfo);
+
     }
 
 
-    LOG.debug("columns: {} {}",columnNameProperty,columnNames);
-    LOG.debug("types: {} {}", columnTypeProperty, columnTypes);
-    assert (columnNames.size() == columnTypes.size());
+    /**
+     * The purpose of a deserialize method is to turn a data blob
+     * which is a writable representation of the data into an
+     * object that can then be parsed using the appropriate
+     * ObjectInspector. In this case, since HCatRecord is directly
+     * already the Writable object, there's no extra work to be done
+     * here. Most of the logic resides in the ObjectInspector to be
+     * able to return values from within the HCatRecord to hive when
+     * it wants it.
+     */
+    @Override
+    public Object deserialize(Writable data) throws SerDeException {
+        if (!(data instanceof HCatRecord)) {
+            throw new SerDeException(getClass().getName() + ": expects HCatRecord!");
+        }
 
-    rowTypeInfo = (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(columnNames, columnTypes);
-    cachedObjectInspector = HCatRecordObjectInspectorFactory.getHCatRecordObjectInspector(rowTypeInfo);
-  }
-
-  public void initialize(HCatSchema hsch) throws SerDeException {
-
-      LOG.debug("Initializing HCatRecordSerDe through HCatSchema {}." ,hsch);
-
-    rowTypeInfo = (StructTypeInfo) TypeInfoUtils.getTypeInfoFromTypeString(hsch.getSchemaAsTypeString());
-    cachedObjectInspector = HCatRecordObjectInspectorFactory.getHCatRecordObjectInspector(rowTypeInfo);
-
-  }
-
-
-  /**
-   * The purpose of a deserialize method is to turn a data blob
-   * which is a writable representation of the data into an
-   * object that can then be parsed using the appropriate
-   * ObjectInspector. In this case, since HCatRecord is directly
-   * already the Writable object, there's no extra work to be done
-   * here. Most of the logic resides in the ObjectInspector to be
-   * able to return values from within the HCatRecord to hive when
-   * it wants it.
-   */
-  @Override
-  public Object deserialize(Writable data) throws SerDeException {
-    if (!(data instanceof HCatRecord)) {
-      throw new SerDeException(getClass().getName() + ": expects HCatRecord!");
+        return (HCatRecord) data;
     }
 
-    return (HCatRecord) data;
-  }
-
-  /**
-   * The purpose of the serialize method is to turn an object-representation
-   * with a provided ObjectInspector into a Writable format, which
-   * the underlying layer can then use to write out.
-   *
-   * In this case, it means that Hive will call this method to convert
-   * an object with appropriate objectinspectors that it knows about,
-   * to write out a HCatRecord.
-   */
-  @Override
-  public Writable serialize(Object obj, ObjectInspector objInspector)
-      throws SerDeException {
-    if (objInspector.getCategory() != Category.STRUCT) {
-      throw new SerDeException(getClass().toString()
-          + " can only serialize struct types, but we got: "
-          + objInspector.getTypeName());
-    }
-    return new DefaultHCatRecord((List<Object>)serializeStruct(obj,(StructObjectInspector)objInspector));
-  }
-
-
-  /**
-   * Return serialized HCatRecord from an underlying
-   * object-representation, and readable by an ObjectInspector
-   * @param obj : Underlying object-representation
-   * @param soi : StructObjectInspector
-   * @return HCatRecord
-   */
-  private static List<?> serializeStruct(Object obj, StructObjectInspector soi)
-      throws SerDeException {
-
-    List<? extends StructField> fields = soi.getAllStructFieldRefs();
-    List<Object> list = soi.getStructFieldsDataAsList(obj);
-
-    if (list == null){
-      return null;
+    /**
+     * The purpose of the serialize method is to turn an object-representation
+     * with a provided ObjectInspector into a Writable format, which
+     * the underlying layer can then use to write out.
+     *
+     * In this case, it means that Hive will call this method to convert
+     * an object with appropriate objectinspectors that it knows about,
+     * to write out a HCatRecord.
+     */
+    @Override
+    public Writable serialize(Object obj, ObjectInspector objInspector)
+        throws SerDeException {
+        if (objInspector.getCategory() != Category.STRUCT) {
+            throw new SerDeException(getClass().toString()
+                + " can only serialize struct types, but we got: "
+                + objInspector.getTypeName());
+        }
+        return new DefaultHCatRecord((List<Object>) serializeStruct(obj, (StructObjectInspector) objInspector));
     }
 
-    List<Object> l = new ArrayList<Object>(fields.size());
 
-    if (fields != null){
-      for (int i = 0; i < fields.size(); i++) {
+    /**
+     * Return serialized HCatRecord from an underlying
+     * object-representation, and readable by an ObjectInspector
+     * @param obj : Underlying object-representation
+     * @param soi : StructObjectInspector
+     * @return HCatRecord
+     */
+    private static List<?> serializeStruct(Object obj, StructObjectInspector soi)
+        throws SerDeException {
 
-        // Get the field objectInspector and the field object.
-        ObjectInspector foi = fields.get(i).getFieldObjectInspector();
-        Object f = list.get(i);
-        Object res = serializeField(f, foi);
-        l.add(i, res);
-      }
-    }
-    return l;
-  }
+        List<? extends StructField> fields = soi.getAllStructFieldRefs();
+        List<Object> list = soi.getStructFieldsDataAsList(obj);
 
-  /**
-   * Return underlying Java Object from an object-representation
-   * that is readable by a provided ObjectInspector.
-   */
-  public static Object serializeField(Object field, ObjectInspector fieldObjectInspector)
-      throws SerDeException {
+        if (list == null) {
+            return null;
+        }
 
-    Object res;
-    if (fieldObjectInspector.getCategory() == Category.PRIMITIVE){
-      if (field != null && field instanceof Boolean &&
-          HCatContext.getInstance().getConf().getBoolean(
-              HCatConstants.HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER,
-              HCatConstants.HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER_DEFAULT)) {
-        res = ((Boolean) field) ? 1 : 0;
-      } else if (field != null && field instanceof Short &&
-          HCatContext.getInstance().getConf().getBoolean(
-              HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION,
-              HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION_DEFAULT)) {
-        res = new Integer((Short) field);
-      } else if (field != null && field instanceof Byte &&
-          HCatContext.getInstance().getConf().getBoolean(
-              HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION,
-              HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION_DEFAULT)) {
-        res = new Integer((Byte) field);
-      } else {
-        res = ((PrimitiveObjectInspector) fieldObjectInspector).getPrimitiveJavaObject(field);
-      }
-    } else if (fieldObjectInspector.getCategory() == Category.STRUCT){
-      res = serializeStruct(field,(StructObjectInspector)fieldObjectInspector);
-    } else if (fieldObjectInspector.getCategory() == Category.LIST){
-      res = serializeList(field,(ListObjectInspector)fieldObjectInspector);
-    } else if (fieldObjectInspector.getCategory() == Category.MAP){
-      res = serializeMap(field,(MapObjectInspector)fieldObjectInspector);
-    } else {
-      throw new SerDeException(HCatRecordSerDe.class.toString()
-          + " does not know what to do with fields of unknown category: "
-          + fieldObjectInspector.getCategory() + " , type: " + fieldObjectInspector.getTypeName());
-    }
-    return res;
-  }
+        List<Object> l = new ArrayList<Object>(fields.size());
 
-  /**
-   * Helper method to return underlying Java Map from
-   * an object-representation that is readable by a provided
-   * MapObjectInspector
-   */
-  private static Map<?,?> serializeMap(Object f, MapObjectInspector moi) throws SerDeException {
-    ObjectInspector koi = moi.getMapKeyObjectInspector();
-    ObjectInspector voi = moi.getMapValueObjectInspector();
-    Map<Object,Object> m = new TreeMap<Object, Object>();
+        if (fields != null) {
+            for (int i = 0; i < fields.size(); i++) {
 
-    Map<?, ?> readMap = moi.getMap(f);
-    if (readMap == null) {
-      return null;
-    } else {
-      for (Map.Entry<?, ?> entry: readMap.entrySet()) {
-        m.put(serializeField(entry.getKey(),koi), serializeField(entry.getValue(),voi));
-      }
-    }
-    return m;
-  }
-
-  private static List<?> serializeList(Object f, ListObjectInspector loi) throws SerDeException {
-    List l = loi.getList(f);
-    if (l == null){
-      return null;
+                // Get the field objectInspector and the field object.
+                ObjectInspector foi = fields.get(i).getFieldObjectInspector();
+                Object f = list.get(i);
+                Object res = serializeField(f, foi);
+                l.add(i, res);
+            }
+        }
+        return l;
     }
 
-    ObjectInspector eloi = loi.getListElementObjectInspector();
-    if (eloi.getCategory() == Category.PRIMITIVE){
-      List<Object> list = new ArrayList<Object>(l.size());
-      for(int i = 0; i < l.size(); i++){
-        list.add(((PrimitiveObjectInspector)eloi).getPrimitiveJavaObject(l.get(i)));
-      }
-      return list;
-    } else if (eloi.getCategory() == Category.STRUCT){
-      List<List<?>> list = new ArrayList<List<?>>(l.size());
-      for (int i = 0 ; i < l.size() ; i++ ){
-        list.add(serializeStruct(l.get(i), (StructObjectInspector) eloi));
-      }
-      return list;
-    } else if (eloi.getCategory() == Category.LIST){
-      List<List<?>> list = new ArrayList<List<?>>(l.size());
-      for (int i = 0 ; i < l.size() ; i++ ){
-        list.add(serializeList(l.get(i), (ListObjectInspector) eloi));
-      }
-      return list;
-    } else if (eloi.getCategory() == Category.MAP){
-      List<Map<?,?>> list = new ArrayList<Map<?,?>>(l.size());
-      for (int i = 0 ; i < l.size() ; i++ ){
-        list.add(serializeMap(l.get(i), (MapObjectInspector) eloi));
-      }
-      return list;
-    } else {
-      throw new SerDeException(HCatRecordSerDe.class.toString()
-          + " does not know what to do with fields of unknown category: "
-          + eloi.getCategory() + " , type: " + eloi.getTypeName());
+    /**
+     * Return underlying Java Object from an object-representation
+     * that is readable by a provided ObjectInspector.
+     */
+    public static Object serializeField(Object field, ObjectInspector fieldObjectInspector)
+        throws SerDeException {
+
+        Object res;
+        if (fieldObjectInspector.getCategory() == Category.PRIMITIVE) {
+            if (field != null && field instanceof Boolean &&
+                HCatContext.getInstance().getConf().getBoolean(
+                    HCatConstants.HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER,
+                    HCatConstants.HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER_DEFAULT)) {
+                res = ((Boolean) field) ? 1 : 0;
+            } else if (field != null && field instanceof Short &&
+                HCatContext.getInstance().getConf().getBoolean(
+                    HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION,
+                    HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION_DEFAULT)) {
+                res = new Integer((Short) field);
+            } else if (field != null && field instanceof Byte &&
+                HCatContext.getInstance().getConf().getBoolean(
+                    HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION,
+                    HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION_DEFAULT)) {
+                res = new Integer((Byte) field);
+            } else {
+                res = ((PrimitiveObjectInspector) fieldObjectInspector).getPrimitiveJavaObject(field);
+            }
+        } else if (fieldObjectInspector.getCategory() == Category.STRUCT) {
+            res = serializeStruct(field, (StructObjectInspector) fieldObjectInspector);
+        } else if (fieldObjectInspector.getCategory() == Category.LIST) {
+            res = serializeList(field, (ListObjectInspector) fieldObjectInspector);
+        } else if (fieldObjectInspector.getCategory() == Category.MAP) {
+            res = serializeMap(field, (MapObjectInspector) fieldObjectInspector);
+        } else {
+            throw new SerDeException(HCatRecordSerDe.class.toString()
+                + " does not know what to do with fields of unknown category: "
+                + fieldObjectInspector.getCategory() + " , type: " + fieldObjectInspector.getTypeName());
+        }
+        return res;
     }
-  }
+
+    /**
+     * Helper method to return underlying Java Map from
+     * an object-representation that is readable by a provided
+     * MapObjectInspector
+     */
+    private static Map<?, ?> serializeMap(Object f, MapObjectInspector moi) throws SerDeException {
+        ObjectInspector koi = moi.getMapKeyObjectInspector();
+        ObjectInspector voi = moi.getMapValueObjectInspector();
+        Map<Object, Object> m = new TreeMap<Object, Object>();
+
+        Map<?, ?> readMap = moi.getMap(f);
+        if (readMap == null) {
+            return null;
+        } else {
+            for (Map.Entry<?, ?> entry : readMap.entrySet()) {
+                m.put(serializeField(entry.getKey(), koi), serializeField(entry.getValue(), voi));
+            }
+        }
+        return m;
+    }
+
+    private static List<?> serializeList(Object f, ListObjectInspector loi) throws SerDeException {
+        List l = loi.getList(f);
+        if (l == null) {
+            return null;
+        }
+
+        ObjectInspector eloi = loi.getListElementObjectInspector();
+        if (eloi.getCategory() == Category.PRIMITIVE) {
+            List<Object> list = new ArrayList<Object>(l.size());
+            for (int i = 0; i < l.size(); i++) {
+                list.add(((PrimitiveObjectInspector) eloi).getPrimitiveJavaObject(l.get(i)));
+            }
+            return list;
+        } else if (eloi.getCategory() == Category.STRUCT) {
+            List<List<?>> list = new ArrayList<List<?>>(l.size());
+            for (int i = 0; i < l.size(); i++) {
+                list.add(serializeStruct(l.get(i), (StructObjectInspector) eloi));
+            }
+            return list;
+        } else if (eloi.getCategory() == Category.LIST) {
+            List<List<?>> list = new ArrayList<List<?>>(l.size());
+            for (int i = 0; i < l.size(); i++) {
+                list.add(serializeList(l.get(i), (ListObjectInspector) eloi));
+            }
+            return list;
+        } else if (eloi.getCategory() == Category.MAP) {
+            List<Map<?, ?>> list = new ArrayList<Map<?, ?>>(l.size());
+            for (int i = 0; i < l.size(); i++) {
+                list.add(serializeMap(l.get(i), (MapObjectInspector) eloi));
+            }
+            return list;
+        } else {
+            throw new SerDeException(HCatRecordSerDe.class.toString()
+                + " does not know what to do with fields of unknown category: "
+                + eloi.getCategory() + " , type: " + eloi.getTypeName());
+        }
+    }
 
 
-  /**
-   * Return an object inspector that can read through the object
-   * that we return from deserialize(). To wit, that means we need
-   * to return an ObjectInspector that can read HCatRecord, given
-   * the type info for it during initialize(). This also means
-   * that this method cannot and should not be called before initialize()
-   */
-  @Override
-  public ObjectInspector getObjectInspector() throws SerDeException {
-    return (ObjectInspector) cachedObjectInspector;
-  }
+    /**
+     * Return an object inspector that can read through the object
+     * that we return from deserialize(). To wit, that means we need
+     * to return an ObjectInspector that can read HCatRecord, given
+     * the type info for it during initialize(). This also means
+     * that this method cannot and should not be called before initialize()
+     */
+    @Override
+    public ObjectInspector getObjectInspector() throws SerDeException {
+        return (ObjectInspector) cachedObjectInspector;
+    }
 
-  @Override
-  public Class<? extends Writable> getSerializedClass() {
-    return HCatRecord.class;
-  }
+    @Override
+    public Class<? extends Writable> getSerializedClass() {
+        return HCatRecord.class;
+    }
 
-  @Override
-  public SerDeStats getSerDeStats() {
-    // no support for statistics yet
-    return null;
-  }
+    @Override
+    public SerDeStats getSerDeStats() {
+        // no support for statistics yet
+        return null;
+    }
 
 
 }
diff --git a/src/java/org/apache/hcatalog/data/HCatRecordable.java b/src/java/org/apache/hcatalog/data/HCatRecordable.java
index 2d2c4fc..3656bb7 100644
--- a/src/java/org/apache/hcatalog/data/HCatRecordable.java
+++ b/src/java/org/apache/hcatalog/data/HCatRecordable.java
@@ -26,30 +26,30 @@
  */
 public interface HCatRecordable extends Writable {
 
-  /**
-   * Gets the field at the specified index.
-   * @param fieldNum the field number
-   * @return the object at the specified index
-   */
-  Object get(int fieldNum);
+    /**
+     * Gets the field at the specified index.
+     * @param fieldNum the field number
+     * @return the object at the specified index
+     */
+    Object get(int fieldNum);
 
-  /**
-   * Gets all the fields of the hcat record.
-   * @return the list of fields
-   */
-  List<Object> getAll();
+    /**
+     * Gets all the fields of the hcat record.
+     * @return the list of fields
+     */
+    List<Object> getAll();
 
-  /**
-   * Sets the field at the specified index.
-   * @param fieldNum the field number
-   * @param value the value to set
-   */
-  void set(int fieldNum, Object value);
+    /**
+     * Sets the field at the specified index.
+     * @param fieldNum the field number
+     * @param value the value to set
+     */
+    void set(int fieldNum, Object value);
 
-  /**
-   * Gets the size of the hcat record.
-   * @return the size
-   */
-  int size();
+    /**
+     * Gets the size of the hcat record.
+     * @return the size
+     */
+    int size();
 
 }
diff --git a/src/java/org/apache/hcatalog/data/JsonSerDe.java b/src/java/org/apache/hcatalog/data/JsonSerDe.java
index 29adc71..cac8e1a 100644
--- a/src/java/org/apache/hcatalog/data/JsonSerDe.java
+++ b/src/java/org/apache/hcatalog/data/JsonSerDe.java
@@ -72,503 +72,503 @@
 
 public class JsonSerDe implements SerDe {
 
-  private static final Logger LOG = LoggerFactory.getLogger(JsonSerDe.class);
-  private List<String> columnNames;
-  private List<TypeInfo> columnTypes;
+    private static final Logger LOG = LoggerFactory.getLogger(JsonSerDe.class);
+    private List<String> columnNames;
+    private List<TypeInfo> columnTypes;
 
-  private StructTypeInfo rowTypeInfo;
-  private HCatSchema schema;
+    private StructTypeInfo rowTypeInfo;
+    private HCatSchema schema;
 
-  private JsonFactory jsonFactory = null;
+    private JsonFactory jsonFactory = null;
 
-  private HCatRecordObjectInspector cachedObjectInspector;
+    private HCatRecordObjectInspector cachedObjectInspector;
 
-  @Override
-  public void initialize(Configuration conf, Properties tbl)
-      throws SerDeException {
+    @Override
+    public void initialize(Configuration conf, Properties tbl)
+        throws SerDeException {
 
 
-    LOG.debug("Initializing JsonSerDe");
-    LOG.debug("props to serde: {}",tbl.entrySet());
+        LOG.debug("Initializing JsonSerDe");
+        LOG.debug("props to serde: {}", tbl.entrySet());
 
 
-    // Get column names and types
-    String columnNameProperty = tbl.getProperty(Constants.LIST_COLUMNS);
-    String columnTypeProperty = tbl.getProperty(Constants.LIST_COLUMN_TYPES);
+        // Get column names and types
+        String columnNameProperty = tbl.getProperty(Constants.LIST_COLUMNS);
+        String columnTypeProperty = tbl.getProperty(Constants.LIST_COLUMN_TYPES);
 
-    // all table column names
-    if (columnNameProperty.length() == 0) {
-      columnNames = new ArrayList<String>();
-    } else {
-      columnNames = Arrays.asList(columnNameProperty.split(","));
-    }
-
-    // all column types
-    if (columnTypeProperty.length() == 0) {
-      columnTypes = new ArrayList<TypeInfo>();
-    } else {
-      columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty);
-    }
-
-    LOG.debug("columns: {}, {}" , columnNameProperty, columnNames);
-    LOG.debug("types: {}, {} ", columnTypeProperty, columnTypes);
-
-    assert (columnNames.size() == columnTypes.size());
-
-    rowTypeInfo = (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(columnNames, columnTypes);
-
-    cachedObjectInspector = HCatRecordObjectInspectorFactory.getHCatRecordObjectInspector(rowTypeInfo);
-    try {
-      schema = HCatSchemaUtils.getHCatSchema(rowTypeInfo).get(0).getStructSubSchema();
-      LOG.debug("schema : {}", schema);
-      LOG.debug("fields : {}", schema.getFieldNames());
-    } catch (HCatException e) {
-      throw new SerDeException(e);
-    }
-
-    jsonFactory = new JsonFactory();
-  }
-
-  /**
-   * Takes JSON string in Text form, and has to return an object representation above
-   * it that's readable by the corresponding object inspector.
-   *
-   * For this implementation, since we're using the jackson parser, we can construct
-   * our own object implementation, and we use HCatRecord for it
-   */
-  @Override
-  public Object deserialize(Writable blob) throws SerDeException {
-
-    Text t = (Text)blob;
-    JsonParser p;
-    List<Object> r = new ArrayList<Object>(Collections.nCopies(columnNames.size(), null));
-    try {
-      p = jsonFactory.createJsonParser(new ByteArrayInputStream((t.getBytes())));
-      if (p.nextToken() != JsonToken.START_OBJECT) {
-        throw new IOException("Start token not found where expected");
-      }
-      JsonToken token;
-      while( ((token = p.nextToken()) != JsonToken.END_OBJECT)&&(token != null)){
-        // iterate through each token, and create appropriate object here.
-        populateRecord(r,token,p,schema);
-      }
-    } catch (JsonParseException e) {
-      LOG.warn("Error [{}] parsing json text [{}].", e, t);
-      LOG.debug(null,e);
-      throw new SerDeException(e);
-    } catch (IOException e) {
-      LOG.warn("Error [{}] parsing json text [{}].", e, t);
-      LOG.debug(null,e);
-      throw new SerDeException(e);
-    }
-
-    return new DefaultHCatRecord(r);
-  }
-
-  private void populateRecord(List<Object> r, JsonToken token, JsonParser p, HCatSchema s) throws IOException {
-    if (token != JsonToken.FIELD_NAME){
-      throw new IOException("Field name expected");
-    }
-    String fieldName = p.getText();
-    int fpos;
-    try {
-      fpos = s.getPosition(fieldName);
-    } catch (NullPointerException npe){
-      fpos = getPositionFromHiveInternalColumnName(fieldName);
-      LOG.debug("NPE finding position for field [{}] in schema [{}]",fieldName,s);
-      if (!fieldName.equalsIgnoreCase(getHiveInternalColumnName(fpos))){
-        LOG.error("Hive internal column name {} and position "
-            +"encoding {} for the column name are at odds",fieldName,fpos);
-        throw npe;
-      }
-      if (fpos == -1){
-        return; // unknown field, we return.
-      }
-    }
-    HCatFieldSchema hcatFieldSchema = s.getFields().get(fpos);
-    Object currField = extractCurrentField(p, null, hcatFieldSchema,false);
-    r.set(fpos,currField);
-  }
-
-  public String getHiveInternalColumnName(int fpos) {
-    return HiveConf.getColumnInternalName(fpos);
-  }
-
-  public int getPositionFromHiveInternalColumnName(String internalName) {
-//    return HiveConf.getPositionFromInternalName(fieldName);
-    // The above line should have been all the implementation that
-    // we need, but due to a bug in that impl which recognizes
-    // only single-digit columns, we need another impl here.
-    Pattern internalPattern = Pattern.compile("_col([0-9]+)");
-    Matcher m = internalPattern.matcher(internalName);
-    if (!m.matches()){
-      return -1;
-    } else {
-      return Integer.parseInt(m.group(1));
-    }
-  }
-
-  /**
-   * Utility method to extract current expected field from given JsonParser
-   *
-   * To get the field, we need either a type or a hcatFieldSchema(necessary for complex types)
-   * It is possible that one of them can be null, and so, if so, the other is instantiated
-   * from the other
-   *
-   * isTokenCurrent is a boolean variable also passed in, which determines
-   * if the JsonParser is already at the token we expect to read next, or
-   * needs advancing to the next before we read.
-   */
-  private Object extractCurrentField(JsonParser p, Type t,
-      HCatFieldSchema hcatFieldSchema, boolean isTokenCurrent) throws IOException, JsonParseException,
-      HCatException {
-    Object val = null;
-    JsonToken valueToken;
-    if (isTokenCurrent){
-      valueToken = p.getCurrentToken();
-    } else {
-      valueToken = p.nextToken();
-    }
-
-    if (hcatFieldSchema != null){
-      t = hcatFieldSchema.getType();
-    }
-    switch(t) {
-    case INT:
-      val = (valueToken == JsonToken.VALUE_NULL)?null:p.getIntValue();
-      break;
-    case TINYINT:
-      val = (valueToken == JsonToken.VALUE_NULL)?null:p.getByteValue();
-      break;
-    case SMALLINT:
-      val = (valueToken == JsonToken.VALUE_NULL)?null:p.getShortValue();
-      break;
-    case BIGINT:
-      val = (valueToken == JsonToken.VALUE_NULL)?null:p.getLongValue();
-      break;
-    case BOOLEAN:
-      String bval = (valueToken == JsonToken.VALUE_NULL)?null:p.getText();
-      if (bval != null){
-        val = Boolean.valueOf(bval);
-      } else {
-        val = null;
-      }
-      break;
-    case FLOAT:
-      val = (valueToken == JsonToken.VALUE_NULL)?null:p.getFloatValue();
-      break;
-    case DOUBLE:
-      val = (valueToken == JsonToken.VALUE_NULL)?null:p.getDoubleValue();
-      break;
-    case STRING:
-      val = (valueToken == JsonToken.VALUE_NULL)?null:p.getText();
-      break;
-    case BINARY:
-      throw new IOException("JsonSerDe does not support BINARY type");
-    case ARRAY:
-      if (valueToken == JsonToken.VALUE_NULL){
-        val = null;
-        break;
-      }
-      if (valueToken != JsonToken.START_ARRAY){
-        throw new IOException("Start of Array expected");
-      }
-      List<Object> arr = new ArrayList<Object>();
-      while ((valueToken = p.nextToken()) != JsonToken.END_ARRAY) {
-        arr.add(extractCurrentField(p, null,hcatFieldSchema.getArrayElementSchema().get(0),true));
-      }
-      val = arr;
-      break;
-    case MAP:
-      if (valueToken == JsonToken.VALUE_NULL){
-        val = null;
-        break;
-      }
-      if (valueToken != JsonToken.START_OBJECT){
-        throw new IOException("Start of Object expected");
-      }
-      Map<Object,Object> map = new LinkedHashMap<Object,Object>();
-      Type keyType = hcatFieldSchema.getMapKeyType();
-      HCatFieldSchema valueSchema = hcatFieldSchema.getMapValueSchema().get(0);
-      while ((valueToken = p.nextToken()) != JsonToken.END_OBJECT) {
-        Object k = getObjectOfCorrespondingPrimitiveType(p.getCurrentName(),keyType);
-        Object v;
-        if (valueSchema.getType() == HCatFieldSchema.Type.STRUCT){
-          v = extractCurrentField(p,null, valueSchema,false);
+        // all table column names
+        if (columnNameProperty.length() == 0) {
+            columnNames = new ArrayList<String>();
         } else {
-          v = extractCurrentField(p,null, valueSchema,true);
+            columnNames = Arrays.asList(columnNameProperty.split(","));
         }
 
-        map.put(k, v);
-      }
-      val = map;
-      break;
-    case STRUCT:
-      if (valueToken == JsonToken.VALUE_NULL){
-        val = null;
-        break;
-      }
-      if (valueToken != JsonToken.START_OBJECT){
-        throw new IOException("Start of Object expected");
-      }
-      HCatSchema subSchema = hcatFieldSchema.getStructSubSchema();
-      int sz = subSchema.getFieldNames().size();
+        // all column types
+        if (columnTypeProperty.length() == 0) {
+            columnTypes = new ArrayList<TypeInfo>();
+        } else {
+            columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty);
+        }
 
-      List<Object> struct = new ArrayList<Object>(Collections.nCopies(sz, null));
-      while ((valueToken = p.nextToken()) != JsonToken.END_OBJECT) {
-        populateRecord(struct, valueToken, p, subSchema);
-      }
-      val = struct;
-      break;
+        LOG.debug("columns: {}, {}", columnNameProperty, columnNames);
+        LOG.debug("types: {}, {} ", columnTypeProperty, columnTypes);
+
+        assert (columnNames.size() == columnTypes.size());
+
+        rowTypeInfo = (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(columnNames, columnTypes);
+
+        cachedObjectInspector = HCatRecordObjectInspectorFactory.getHCatRecordObjectInspector(rowTypeInfo);
+        try {
+            schema = HCatSchemaUtils.getHCatSchema(rowTypeInfo).get(0).getStructSubSchema();
+            LOG.debug("schema : {}", schema);
+            LOG.debug("fields : {}", schema.getFieldNames());
+        } catch (HCatException e) {
+            throw new SerDeException(e);
+        }
+
+        jsonFactory = new JsonFactory();
     }
-    return val;
-  }
 
-  private Object getObjectOfCorrespondingPrimitiveType(String s, Type t) throws IOException {
-    switch(t) {
-    case INT:
-      return Integer.valueOf(s);
-    case TINYINT:
-      return Byte.valueOf(s);
-    case SMALLINT:
-      return Short.valueOf(s);
-    case BIGINT:
-      return Long.valueOf(s);
-    case BOOLEAN:
-      return (s.equalsIgnoreCase("true"));
-    case FLOAT:
-      return Float.valueOf(s);
-    case DOUBLE:
-      return Double.valueOf(s);
-    case STRING:
-      return s;
-    case BINARY:
-      throw new IOException("JsonSerDe does not support BINARY type");
+    /**
+     * Takes JSON string in Text form, and has to return an object representation above
+     * it that's readable by the corresponding object inspector.
+     *
+     * For this implementation, since we're using the jackson parser, we can construct
+     * our own object implementation, and we use HCatRecord for it
+     */
+    @Override
+    public Object deserialize(Writable blob) throws SerDeException {
+
+        Text t = (Text) blob;
+        JsonParser p;
+        List<Object> r = new ArrayList<Object>(Collections.nCopies(columnNames.size(), null));
+        try {
+            p = jsonFactory.createJsonParser(new ByteArrayInputStream((t.getBytes())));
+            if (p.nextToken() != JsonToken.START_OBJECT) {
+                throw new IOException("Start token not found where expected");
+            }
+            JsonToken token;
+            while (((token = p.nextToken()) != JsonToken.END_OBJECT) && (token != null)) {
+                // iterate through each token, and create appropriate object here.
+                populateRecord(r, token, p, schema);
+            }
+        } catch (JsonParseException e) {
+            LOG.warn("Error [{}] parsing json text [{}].", e, t);
+            LOG.debug(null, e);
+            throw new SerDeException(e);
+        } catch (IOException e) {
+            LOG.warn("Error [{}] parsing json text [{}].", e, t);
+            LOG.debug(null, e);
+            throw new SerDeException(e);
+        }
+
+        return new DefaultHCatRecord(r);
     }
-    throw new IOException("Could not convert from string to map type "+t);
-  }
 
-  /**
-   * Given an object and object inspector pair, traverse the object
-   * and generate a Text representation of the object.
-   */
-  @Override
-  public Writable serialize(Object obj, ObjectInspector objInspector)
-      throws SerDeException {
-    StringBuilder sb = new StringBuilder();
-    try {
-
-      StructObjectInspector soi = (StructObjectInspector) objInspector;
-      List<? extends StructField> structFields = soi.getAllStructFieldRefs();
-      assert (columnNames.size() == structFields.size());
-      if (obj == null) {
-        sb.append("null");
-      } else {
-        sb.append(SerDeUtils.LBRACE);
-        for (int i = 0; i < structFields.size(); i++) {
-          if (i > 0) {
-            sb.append(SerDeUtils.COMMA);
-          }
-          sb.append(SerDeUtils.QUOTE);
-          sb.append(columnNames.get(i));
-          sb.append(SerDeUtils.QUOTE);
-          sb.append(SerDeUtils.COLON);
-          buildJSONString(sb, soi.getStructFieldData(obj, structFields.get(i)),
-              structFields.get(i).getFieldObjectInspector());
+    private void populateRecord(List<Object> r, JsonToken token, JsonParser p, HCatSchema s) throws IOException {
+        if (token != JsonToken.FIELD_NAME) {
+            throw new IOException("Field name expected");
         }
-        sb.append(SerDeUtils.RBRACE);
-      }
-
-    } catch (IOException e) {
-      LOG.warn("Error generating json text from object.", e);
-      throw new SerDeException(e);
+        String fieldName = p.getText();
+        int fpos;
+        try {
+            fpos = s.getPosition(fieldName);
+        } catch (NullPointerException npe) {
+            fpos = getPositionFromHiveInternalColumnName(fieldName);
+            LOG.debug("NPE finding position for field [{}] in schema [{}]", fieldName, s);
+            if (!fieldName.equalsIgnoreCase(getHiveInternalColumnName(fpos))) {
+                LOG.error("Hive internal column name {} and position "
+                    + "encoding {} for the column name are at odds", fieldName, fpos);
+                throw npe;
+            }
+            if (fpos == -1) {
+                return; // unknown field, we return.
+            }
+        }
+        HCatFieldSchema hcatFieldSchema = s.getFields().get(fpos);
+        Object currField = extractCurrentField(p, null, hcatFieldSchema, false);
+        r.set(fpos, currField);
     }
-    return new Text(sb.toString());
-  }
 
-  // TODO : code section copied over from SerDeUtils because of non-standard json production there
-  // should use quotes for all field names. We should fix this there, and then remove this copy.
-  // See http://jackson.codehaus.org/1.7.3/javadoc/org/codehaus/jackson/JsonParser.Feature.html#ALLOW_UNQUOTED_FIELD_NAMES
-  // for details - trying to enable Jackson to ignore that doesn't seem to work(compilation failure
-  // when attempting to use that feature, so having to change the production itself.
-  // Also, throws IOException when Binary is detected.
-  private static void buildJSONString(StringBuilder sb, Object o, ObjectInspector oi) throws IOException {
+    public String getHiveInternalColumnName(int fpos) {
+        return HiveConf.getColumnInternalName(fpos);
+    }
 
-    switch (oi.getCategory()) {
-    case PRIMITIVE: {
-      PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi;
-      if (o == null) {
-        sb.append("null");
-      } else {
-        switch (poi.getPrimitiveCategory()) {
-        case BOOLEAN: {
-          boolean b = ((BooleanObjectInspector) poi).get(o);
-          sb.append(b ? "true" : "false");
-          break;
+    public int getPositionFromHiveInternalColumnName(String internalName) {
+//    return HiveConf.getPositionFromInternalName(fieldName);
+        // The above line should have been all the implementation that
+        // we need, but due to a bug in that impl which recognizes
+        // only single-digit columns, we need another impl here.
+        Pattern internalPattern = Pattern.compile("_col([0-9]+)");
+        Matcher m = internalPattern.matcher(internalName);
+        if (!m.matches()) {
+            return -1;
+        } else {
+            return Integer.parseInt(m.group(1));
         }
-        case BYTE: {
-          sb.append(((ByteObjectInspector) poi).get(o));
-          break;
+    }
+
+    /**
+     * Utility method to extract current expected field from given JsonParser
+     *
+     * To get the field, we need either a type or a hcatFieldSchema(necessary for complex types)
+     * It is possible that one of them can be null, and so, if so, the other is instantiated
+     * from the other
+     *
+     * isTokenCurrent is a boolean variable also passed in, which determines
+     * if the JsonParser is already at the token we expect to read next, or
+     * needs advancing to the next before we read.
+     */
+    private Object extractCurrentField(JsonParser p, Type t,
+                                       HCatFieldSchema hcatFieldSchema, boolean isTokenCurrent) throws IOException, JsonParseException,
+        HCatException {
+        Object val = null;
+        JsonToken valueToken;
+        if (isTokenCurrent) {
+            valueToken = p.getCurrentToken();
+        } else {
+            valueToken = p.nextToken();
         }
-        case SHORT: {
-          sb.append(((ShortObjectInspector) poi).get(o));
-          break;
+
+        if (hcatFieldSchema != null) {
+            t = hcatFieldSchema.getType();
         }
-        case INT: {
-          sb.append(((IntObjectInspector) poi).get(o));
-          break;
+        switch (t) {
+        case INT:
+            val = (valueToken == JsonToken.VALUE_NULL) ? null : p.getIntValue();
+            break;
+        case TINYINT:
+            val = (valueToken == JsonToken.VALUE_NULL) ? null : p.getByteValue();
+            break;
+        case SMALLINT:
+            val = (valueToken == JsonToken.VALUE_NULL) ? null : p.getShortValue();
+            break;
+        case BIGINT:
+            val = (valueToken == JsonToken.VALUE_NULL) ? null : p.getLongValue();
+            break;
+        case BOOLEAN:
+            String bval = (valueToken == JsonToken.VALUE_NULL) ? null : p.getText();
+            if (bval != null) {
+                val = Boolean.valueOf(bval);
+            } else {
+                val = null;
+            }
+            break;
+        case FLOAT:
+            val = (valueToken == JsonToken.VALUE_NULL) ? null : p.getFloatValue();
+            break;
+        case DOUBLE:
+            val = (valueToken == JsonToken.VALUE_NULL) ? null : p.getDoubleValue();
+            break;
+        case STRING:
+            val = (valueToken == JsonToken.VALUE_NULL) ? null : p.getText();
+            break;
+        case BINARY:
+            throw new IOException("JsonSerDe does not support BINARY type");
+        case ARRAY:
+            if (valueToken == JsonToken.VALUE_NULL) {
+                val = null;
+                break;
+            }
+            if (valueToken != JsonToken.START_ARRAY) {
+                throw new IOException("Start of Array expected");
+            }
+            List<Object> arr = new ArrayList<Object>();
+            while ((valueToken = p.nextToken()) != JsonToken.END_ARRAY) {
+                arr.add(extractCurrentField(p, null, hcatFieldSchema.getArrayElementSchema().get(0), true));
+            }
+            val = arr;
+            break;
+        case MAP:
+            if (valueToken == JsonToken.VALUE_NULL) {
+                val = null;
+                break;
+            }
+            if (valueToken != JsonToken.START_OBJECT) {
+                throw new IOException("Start of Object expected");
+            }
+            Map<Object, Object> map = new LinkedHashMap<Object, Object>();
+            Type keyType = hcatFieldSchema.getMapKeyType();
+            HCatFieldSchema valueSchema = hcatFieldSchema.getMapValueSchema().get(0);
+            while ((valueToken = p.nextToken()) != JsonToken.END_OBJECT) {
+                Object k = getObjectOfCorrespondingPrimitiveType(p.getCurrentName(), keyType);
+                Object v;
+                if (valueSchema.getType() == HCatFieldSchema.Type.STRUCT) {
+                    v = extractCurrentField(p, null, valueSchema, false);
+                } else {
+                    v = extractCurrentField(p, null, valueSchema, true);
+                }
+
+                map.put(k, v);
+            }
+            val = map;
+            break;
+        case STRUCT:
+            if (valueToken == JsonToken.VALUE_NULL) {
+                val = null;
+                break;
+            }
+            if (valueToken != JsonToken.START_OBJECT) {
+                throw new IOException("Start of Object expected");
+            }
+            HCatSchema subSchema = hcatFieldSchema.getStructSubSchema();
+            int sz = subSchema.getFieldNames().size();
+
+            List<Object> struct = new ArrayList<Object>(Collections.nCopies(sz, null));
+            while ((valueToken = p.nextToken()) != JsonToken.END_OBJECT) {
+                populateRecord(struct, valueToken, p, subSchema);
+            }
+            val = struct;
+            break;
         }
-        case LONG: {
-          sb.append(((LongObjectInspector) poi).get(o));
-          break;
+        return val;
+    }
+
+    private Object getObjectOfCorrespondingPrimitiveType(String s, Type t) throws IOException {
+        switch (t) {
+        case INT:
+            return Integer.valueOf(s);
+        case TINYINT:
+            return Byte.valueOf(s);
+        case SMALLINT:
+            return Short.valueOf(s);
+        case BIGINT:
+            return Long.valueOf(s);
+        case BOOLEAN:
+            return (s.equalsIgnoreCase("true"));
+        case FLOAT:
+            return Float.valueOf(s);
+        case DOUBLE:
+            return Double.valueOf(s);
+        case STRING:
+            return s;
+        case BINARY:
+            throw new IOException("JsonSerDe does not support BINARY type");
         }
-        case FLOAT: {
-          sb.append(((FloatObjectInspector) poi).get(o));
-          break;
+        throw new IOException("Could not convert from string to map type " + t);
+    }
+
+    /**
+     * Given an object and object inspector pair, traverse the object
+     * and generate a Text representation of the object.
+     */
+    @Override
+    public Writable serialize(Object obj, ObjectInspector objInspector)
+        throws SerDeException {
+        StringBuilder sb = new StringBuilder();
+        try {
+
+            StructObjectInspector soi = (StructObjectInspector) objInspector;
+            List<? extends StructField> structFields = soi.getAllStructFieldRefs();
+            assert (columnNames.size() == structFields.size());
+            if (obj == null) {
+                sb.append("null");
+            } else {
+                sb.append(SerDeUtils.LBRACE);
+                for (int i = 0; i < structFields.size(); i++) {
+                    if (i > 0) {
+                        sb.append(SerDeUtils.COMMA);
+                    }
+                    sb.append(SerDeUtils.QUOTE);
+                    sb.append(columnNames.get(i));
+                    sb.append(SerDeUtils.QUOTE);
+                    sb.append(SerDeUtils.COLON);
+                    buildJSONString(sb, soi.getStructFieldData(obj, structFields.get(i)),
+                        structFields.get(i).getFieldObjectInspector());
+                }
+                sb.append(SerDeUtils.RBRACE);
+            }
+
+        } catch (IOException e) {
+            LOG.warn("Error generating json text from object.", e);
+            throw new SerDeException(e);
         }
-        case DOUBLE: {
-          sb.append(((DoubleObjectInspector) poi).get(o));
-          break;
+        return new Text(sb.toString());
+    }
+
+    // TODO : code section copied over from SerDeUtils because of non-standard json production there
+    // should use quotes for all field names. We should fix this there, and then remove this copy.
+    // See http://jackson.codehaus.org/1.7.3/javadoc/org/codehaus/jackson/JsonParser.Feature.html#ALLOW_UNQUOTED_FIELD_NAMES
+    // for details - trying to enable Jackson to ignore that doesn't seem to work(compilation failure
+    // when attempting to use that feature, so having to change the production itself.
+    // Also, throws IOException when Binary is detected.
+    private static void buildJSONString(StringBuilder sb, Object o, ObjectInspector oi) throws IOException {
+
+        switch (oi.getCategory()) {
+        case PRIMITIVE: {
+            PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi;
+            if (o == null) {
+                sb.append("null");
+            } else {
+                switch (poi.getPrimitiveCategory()) {
+                case BOOLEAN: {
+                    boolean b = ((BooleanObjectInspector) poi).get(o);
+                    sb.append(b ? "true" : "false");
+                    break;
+                }
+                case BYTE: {
+                    sb.append(((ByteObjectInspector) poi).get(o));
+                    break;
+                }
+                case SHORT: {
+                    sb.append(((ShortObjectInspector) poi).get(o));
+                    break;
+                }
+                case INT: {
+                    sb.append(((IntObjectInspector) poi).get(o));
+                    break;
+                }
+                case LONG: {
+                    sb.append(((LongObjectInspector) poi).get(o));
+                    break;
+                }
+                case FLOAT: {
+                    sb.append(((FloatObjectInspector) poi).get(o));
+                    break;
+                }
+                case DOUBLE: {
+                    sb.append(((DoubleObjectInspector) poi).get(o));
+                    break;
+                }
+                case STRING: {
+                    sb.append('"');
+                    sb.append(SerDeUtils.escapeString(((StringObjectInspector) poi)
+                        .getPrimitiveJavaObject(o)));
+                    sb.append('"');
+                    break;
+                }
+                case TIMESTAMP: {
+                    sb.append('"');
+                    sb.append(((TimestampObjectInspector) poi)
+                        .getPrimitiveWritableObject(o));
+                    sb.append('"');
+                    break;
+                }
+                case BINARY: {
+                    throw new IOException("JsonSerDe does not support BINARY type");
+                }
+                default:
+                    throw new RuntimeException("Unknown primitive type: "
+                        + poi.getPrimitiveCategory());
+                }
+            }
+            break;
         }
-        case STRING: {
-          sb.append('"');
-          sb.append(SerDeUtils.escapeString(((StringObjectInspector) poi)
-              .getPrimitiveJavaObject(o)));
-          sb.append('"');
-          break;
+        case LIST: {
+            ListObjectInspector loi = (ListObjectInspector) oi;
+            ObjectInspector listElementObjectInspector = loi
+                .getListElementObjectInspector();
+            List<?> olist = loi.getList(o);
+            if (olist == null) {
+                sb.append("null");
+            } else {
+                sb.append(SerDeUtils.LBRACKET);
+                for (int i = 0; i < olist.size(); i++) {
+                    if (i > 0) {
+                        sb.append(SerDeUtils.COMMA);
+                    }
+                    buildJSONString(sb, olist.get(i), listElementObjectInspector);
+                }
+                sb.append(SerDeUtils.RBRACKET);
+            }
+            break;
         }
-        case TIMESTAMP: {
-          sb.append('"');
-          sb.append(((TimestampObjectInspector) poi)
-              .getPrimitiveWritableObject(o));
-          sb.append('"');
-          break;
+        case MAP: {
+            MapObjectInspector moi = (MapObjectInspector) oi;
+            ObjectInspector mapKeyObjectInspector = moi.getMapKeyObjectInspector();
+            ObjectInspector mapValueObjectInspector = moi
+                .getMapValueObjectInspector();
+            Map<?, ?> omap = moi.getMap(o);
+            if (omap == null) {
+                sb.append("null");
+            } else {
+                sb.append(SerDeUtils.LBRACE);
+                boolean first = true;
+                for (Object entry : omap.entrySet()) {
+                    if (first) {
+                        first = false;
+                    } else {
+                        sb.append(SerDeUtils.COMMA);
+                    }
+                    Map.Entry<?, ?> e = (Map.Entry<?, ?>) entry;
+                    StringBuilder keyBuilder = new StringBuilder();
+                    buildJSONString(keyBuilder, e.getKey(), mapKeyObjectInspector);
+                    String keyString = keyBuilder.toString().trim();
+                    boolean doQuoting = (!keyString.isEmpty()) && (keyString.charAt(0) != SerDeUtils.QUOTE);
+                    if (doQuoting) {
+                        sb.append(SerDeUtils.QUOTE);
+                    }
+                    sb.append(keyString);
+                    if (doQuoting) {
+                        sb.append(SerDeUtils.QUOTE);
+                    }
+                    sb.append(SerDeUtils.COLON);
+                    buildJSONString(sb, e.getValue(), mapValueObjectInspector);
+                }
+                sb.append(SerDeUtils.RBRACE);
+            }
+            break;
         }
-        case BINARY: {
-          throw new IOException("JsonSerDe does not support BINARY type");
+        case STRUCT: {
+            StructObjectInspector soi = (StructObjectInspector) oi;
+            List<? extends StructField> structFields = soi.getAllStructFieldRefs();
+            if (o == null) {
+                sb.append("null");
+            } else {
+                sb.append(SerDeUtils.LBRACE);
+                for (int i = 0; i < structFields.size(); i++) {
+                    if (i > 0) {
+                        sb.append(SerDeUtils.COMMA);
+                    }
+                    sb.append(SerDeUtils.QUOTE);
+                    sb.append(structFields.get(i).getFieldName());
+                    sb.append(SerDeUtils.QUOTE);
+                    sb.append(SerDeUtils.COLON);
+                    buildJSONString(sb, soi.getStructFieldData(o, structFields.get(i)),
+                        structFields.get(i).getFieldObjectInspector());
+                }
+                sb.append(SerDeUtils.RBRACE);
+            }
+            break;
+        }
+        case UNION: {
+            UnionObjectInspector uoi = (UnionObjectInspector) oi;
+            if (o == null) {
+                sb.append("null");
+            } else {
+                sb.append(SerDeUtils.LBRACE);
+                sb.append(uoi.getTag(o));
+                sb.append(SerDeUtils.COLON);
+                buildJSONString(sb, uoi.getField(o),
+                    uoi.getObjectInspectors().get(uoi.getTag(o)));
+                sb.append(SerDeUtils.RBRACE);
+            }
+            break;
         }
         default:
-          throw new RuntimeException("Unknown primitive type: "
-              + poi.getPrimitiveCategory());
+            throw new RuntimeException("Unknown type in ObjectInspector!");
         }
-      }
-      break;
     }
-    case LIST: {
-      ListObjectInspector loi = (ListObjectInspector) oi;
-      ObjectInspector listElementObjectInspector = loi
-          .getListElementObjectInspector();
-      List<?> olist = loi.getList(o);
-      if (olist == null) {
-        sb.append("null");
-      } else {
-        sb.append(SerDeUtils.LBRACKET);
-        for (int i = 0; i < olist.size(); i++) {
-          if (i > 0) {
-            sb.append(SerDeUtils.COMMA);
-          }
-          buildJSONString(sb, olist.get(i), listElementObjectInspector);
-        }
-        sb.append(SerDeUtils.RBRACKET);
-      }
-      break;
-    }
-    case MAP: {
-      MapObjectInspector moi = (MapObjectInspector) oi;
-      ObjectInspector mapKeyObjectInspector = moi.getMapKeyObjectInspector();
-      ObjectInspector mapValueObjectInspector = moi
-          .getMapValueObjectInspector();
-      Map<?, ?> omap = moi.getMap(o);
-      if (omap == null) {
-        sb.append("null");
-      } else {
-        sb.append(SerDeUtils.LBRACE);
-        boolean first = true;
-        for (Object entry : omap.entrySet()) {
-          if (first) {
-            first = false;
-          } else {
-            sb.append(SerDeUtils.COMMA);
-          }
-          Map.Entry<?, ?> e = (Map.Entry<?, ?>) entry;
-          StringBuilder keyBuilder = new StringBuilder();
-          buildJSONString(keyBuilder, e.getKey(), mapKeyObjectInspector);
-          String keyString = keyBuilder.toString().trim();
-          boolean doQuoting =  (!keyString.isEmpty()) && (keyString.charAt(0)!= SerDeUtils.QUOTE);
-          if (doQuoting ){
-            sb.append(SerDeUtils.QUOTE);
-          }
-          sb.append(keyString);
-          if (doQuoting ){
-            sb.append(SerDeUtils.QUOTE);
-          }
-          sb.append(SerDeUtils.COLON);
-          buildJSONString(sb, e.getValue(), mapValueObjectInspector);
-        }
-        sb.append(SerDeUtils.RBRACE);
-      }
-      break;
-    }
-    case STRUCT: {
-      StructObjectInspector soi = (StructObjectInspector) oi;
-      List<? extends StructField> structFields = soi.getAllStructFieldRefs();
-      if (o == null) {
-        sb.append("null");
-      } else {
-        sb.append(SerDeUtils.LBRACE);
-        for (int i = 0; i < structFields.size(); i++) {
-          if (i > 0) {
-            sb.append(SerDeUtils.COMMA);
-          }
-          sb.append(SerDeUtils.QUOTE);
-          sb.append(structFields.get(i).getFieldName());
-          sb.append(SerDeUtils.QUOTE);
-          sb.append(SerDeUtils.COLON);
-          buildJSONString(sb, soi.getStructFieldData(o, structFields.get(i)),
-              structFields.get(i).getFieldObjectInspector());
-        }
-        sb.append(SerDeUtils.RBRACE);
-      }
-      break;
-    }
-    case UNION: {
-      UnionObjectInspector uoi = (UnionObjectInspector) oi;
-      if (o == null) {
-        sb.append("null");
-      } else {
-        sb.append(SerDeUtils.LBRACE);
-        sb.append(uoi.getTag(o));
-        sb.append(SerDeUtils.COLON);
-        buildJSONString(sb, uoi.getField(o),
-              uoi.getObjectInspectors().get(uoi.getTag(o)));
-        sb.append(SerDeUtils.RBRACE);
-      }
-      break;
-    }
-    default:
-      throw new RuntimeException("Unknown type in ObjectInspector!");
-    }
-  }
 
 
-  /**
-   *  Returns an object inspector for the specified schema that
-   *  is capable of reading in the object representation of the JSON string
-   */
-  @Override
-  public ObjectInspector getObjectInspector() throws SerDeException {
-    return cachedObjectInspector;
-  }
+    /**
+     *  Returns an object inspector for the specified schema that
+     *  is capable of reading in the object representation of the JSON string
+     */
+    @Override
+    public ObjectInspector getObjectInspector() throws SerDeException {
+        return cachedObjectInspector;
+    }
 
-  @Override
-  public Class<? extends Writable> getSerializedClass() {
-    return Text.class;
-  }
+    @Override
+    public Class<? extends Writable> getSerializedClass() {
+        return Text.class;
+    }
 
-  @Override
-  public SerDeStats getSerDeStats() {
-    // no support for statistics yet
-    return null;
-  }
+    @Override
+    public SerDeStats getSerDeStats() {
+        // no support for statistics yet
+        return null;
+    }
 
 }
diff --git a/src/java/org/apache/hcatalog/data/LazyHCatRecord.java b/src/java/org/apache/hcatalog/data/LazyHCatRecord.java
index 9917f5c..6bcb138 100644
--- a/src/java/org/apache/hcatalog/data/LazyHCatRecord.java
+++ b/src/java/org/apache/hcatalog/data/LazyHCatRecord.java
@@ -40,110 +40,106 @@
  */
 public class LazyHCatRecord extends HCatRecord {
 
-  public static final Logger LOG = LoggerFactory.getLogger(LazyHCatRecord.class.getName());
+    public static final Logger LOG = LoggerFactory.getLogger(LazyHCatRecord.class.getName());
 
-  private Object wrappedObject;
-  private StructObjectInspector soi;
-  
-  @Override
-  public Object get(int fieldNum) {
-    try {
-      StructField fref = soi.getAllStructFieldRefs().get(fieldNum);
-      return HCatRecordSerDe.serializeField(
-          soi.getStructFieldData(wrappedObject, fref),
-          fref.getFieldObjectInspector());
-    } catch (SerDeException e) {
-      throw new IllegalStateException("SerDe Exception deserializing",e);
-    }
-  }
-  
+    private Object wrappedObject;
+    private StructObjectInspector soi;
 
-  @Override
-  public List<Object> getAll() {
-    
-    List<Object> r = new ArrayList<Object>(this.size());
-    for (int i = 0; i < this.size(); i++){
-      r.add(i, get(i));
-    }
-    return r;
-  }
-
-  @Override
-  public void set(int fieldNum, Object value) {
-    throw new UnsupportedOperationException("not allowed to run set() on LazyHCatRecord");
-  }
-
-  @Override
-  public int size() {
-    return soi.getAllStructFieldRefs().size();
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    throw new UnsupportedOperationException("LazyHCatRecord is intended to wrap"
-        + " an object/object inspector as a HCatRecord "
-        + "- it does not need to be read from DataInput.");
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    throw new UnsupportedOperationException("LazyHCatRecord is intended to wrap"
-        + " an object/object inspector as a HCatRecord "
-        + "- it does not need to be written to a DataOutput.");
-  }
-
-  @Override
-  public Object get(String fieldName, HCatSchema recordSchema)
-      throws HCatException {
-    int idx = recordSchema.getPosition(fieldName);
-    return get(idx);
-  }
-
-  @Override
-  public void set(String fieldName, HCatSchema recordSchema, Object value)
-      throws HCatException {
-    throw new UnsupportedOperationException("not allowed to run set() on LazyHCatRecord");
-  }
-
-  @Override
-  public void remove(int idx) throws HCatException {
-    throw new UnsupportedOperationException("not allowed to run remove() on LazyHCatRecord");
-  }
-
-  @Override
-  public void copy(HCatRecord r) throws HCatException {
-    throw new UnsupportedOperationException("not allowed to run copy() on LazyHCatRecord");
-  }
-  
-  public LazyHCatRecord(Object wrappedObject, ObjectInspector oi) throws Exception {
-    if (oi.getCategory() != Category.STRUCT) {
-      throw new SerDeException(getClass().toString() + " can only make a lazy hcat record from " +
-          "objects of struct types, but we got: " + oi.getTypeName());
+    @Override
+    public Object get(int fieldNum) {
+        try {
+            StructField fref = soi.getAllStructFieldRefs().get(fieldNum);
+            return HCatRecordSerDe.serializeField(
+                soi.getStructFieldData(wrappedObject, fref),
+                    fref.getFieldObjectInspector());
+        } catch (SerDeException e) {
+            throw new IllegalStateException("SerDe Exception deserializing",e);
+        }
     }
 
-    this.soi = (StructObjectInspector)oi;
-    this.wrappedObject = wrappedObject;
-  }
-
-  @Override
-  public String toString(){
-    StringBuilder sb = new StringBuilder();
-    for(int i = 0; i< size() ; i++) {
-      sb.append(get(i)+"\t");
+    @Override
+    public List<Object> getAll() {
+        List<Object> r = new ArrayList<Object>(this.size());
+        for (int i = 0; i < this.size(); i++){
+            r.add(i, get(i));
+        }
+        return r;
     }
-    return sb.toString();
-  }
 
-  /**
-   * Convert this LazyHCatRecord to a DefaultHCatRecord.  This is required
-   * before you can write out a record via write.
-   * @return an HCatRecord that can be serialized
-   * @throws HCatException
-   */
-  public HCatRecord getWritable() throws HCatException {
-    DefaultHCatRecord d = new DefaultHCatRecord();
-    d.copy(this);
-    return d;
-  }
+    @Override
+    public void set(int fieldNum, Object value) {
+        throw new UnsupportedOperationException("not allowed to run set() on LazyHCatRecord");
+    }
 
+    @Override
+    public int size() {
+        return soi.getAllStructFieldRefs().size();
+    }
+
+    @Override
+    public void readFields(DataInput in) throws IOException {
+        throw new UnsupportedOperationException("LazyHCatRecord is intended to wrap"
+            + " an object/object inspector as a HCatRecord "
+            + "- it does not need to be read from DataInput.");
+    }
+
+    @Override
+    public void write(DataOutput out) throws IOException {
+        throw new UnsupportedOperationException("LazyHCatRecord is intended to wrap"
+            + " an object/object inspector as a HCatRecord "
+            + "- it does not need to be written to a DataOutput.");
+    }
+
+    @Override
+    public Object get(String fieldName, HCatSchema recordSchema) throws HCatException {
+        int idx = recordSchema.getPosition(fieldName);
+        return get(idx);
+    }
+
+    @Override
+    public void set(String fieldName, HCatSchema recordSchema, Object value) throws HCatException {
+        throw new UnsupportedOperationException("not allowed to run set() on LazyHCatRecord");
+    }
+
+    @Override
+    public void remove(int idx) throws HCatException {
+        throw new UnsupportedOperationException("not allowed to run remove() on LazyHCatRecord");
+    }
+
+    @Override
+    public void copy(HCatRecord r) throws HCatException {
+        throw new UnsupportedOperationException("not allowed to run copy() on LazyHCatRecord");
+    }
+
+    public LazyHCatRecord(Object wrappedObject, ObjectInspector oi) throws Exception {
+        if (oi.getCategory() != Category.STRUCT) {
+            throw new SerDeException(getClass().toString() +
+                " can only make a lazy hcat record from " +
+                "objects of struct types, but we got: " + oi.getTypeName());
+        }
+
+        this.soi = (StructObjectInspector)oi;
+        this.wrappedObject = wrappedObject;
+    }
+
+    @Override
+    public String toString(){
+        StringBuilder sb = new StringBuilder();
+        for(int i = 0; i< size() ; i++) {
+            sb.append(get(i)+"\t");
+        }
+        return sb.toString();
+    }
+
+    /**
+     * Convert this LazyHCatRecord to a DefaultHCatRecord.  This is required
+     * before you can write out a record via write.
+     * @return an HCatRecord that can be serialized
+     * @throws HCatException
+     */
+    public HCatRecord getWritable() throws HCatException {
+        DefaultHCatRecord d = new DefaultHCatRecord();
+        d.copy(this);
+        return d;
+    }
 }
diff --git a/src/java/org/apache/hcatalog/data/Pair.java b/src/java/org/apache/hcatalog/data/Pair.java
index dea9d27..9eb525f 100644
--- a/src/java/org/apache/hcatalog/data/Pair.java
+++ b/src/java/org/apache/hcatalog/data/Pair.java
@@ -42,44 +42,44 @@
      */
     @Override
     public String toString() {
-        return "[" + first.toString() +"," + second.toString() + "]";
+        return "[" + first.toString() + "," + second.toString() + "]";
     }
 
     @Override
     public int hashCode() {
         return (((this.first == null ? 1 : this.first.hashCode()) * 17)
-                + (this.second == null ? 1 : this.second.hashCode()) * 19);
+            + (this.second == null ? 1 : this.second.hashCode()) * 19);
     }
 
     @Override
     public boolean equals(Object other) {
-        if(other == null) {
+        if (other == null) {
             return false;
         }
 
-        if(! (other instanceof Pair)) {
+        if (!(other instanceof Pair)) {
             return false;
         }
 
         Pair otherPair = (Pair) other;
 
-        if(this.first == null) {
-            if(otherPair.first != null) {
+        if (this.first == null) {
+            if (otherPair.first != null) {
                 return false;
             } else {
                 return true;
             }
         }
 
-        if(this.second == null) {
-            if(otherPair.second != null) {
+        if (this.second == null) {
+            if (otherPair.second != null) {
                 return false;
             } else {
                 return true;
             }
         }
 
-        if(this.first.equals(otherPair.first) && this.second.equals(otherPair.second)) {
+        if (this.first.equals(otherPair.first) && this.second.equals(otherPair.second)) {
             return true;
         } else {
             return false;
diff --git a/src/java/org/apache/hcatalog/data/ReaderWriter.java b/src/java/org/apache/hcatalog/data/ReaderWriter.java
index 4210133..79b4035 100644
--- a/src/java/org/apache/hcatalog/data/ReaderWriter.java
+++ b/src/java/org/apache/hcatalog/data/ReaderWriter.java
@@ -34,158 +34,158 @@
 
 public abstract class ReaderWriter {
 
-  private static final String UTF8 = "UTF-8";
+    private static final String UTF8 = "UTF-8";
 
-  public static Object readDatum(DataInput in) throws IOException {
+    public static Object readDatum(DataInput in) throws IOException {
 
-    byte type = in.readByte();
-    switch (type) {
+        byte type = in.readByte();
+        switch (type) {
 
-    case DataType.STRING:
-      byte[] buffer = new byte[in.readInt()];
-      in.readFully(buffer);
-      return new String(buffer,UTF8);
+        case DataType.STRING:
+            byte[] buffer = new byte[in.readInt()];
+            in.readFully(buffer);
+            return new String(buffer, UTF8);
 
-    case DataType.INTEGER:
-      VIntWritable vint = new VIntWritable();
-      vint.readFields(in);
-      return vint.get();
+        case DataType.INTEGER:
+            VIntWritable vint = new VIntWritable();
+            vint.readFields(in);
+            return vint.get();
 
-    case DataType.LONG:
-      VLongWritable vlong = new VLongWritable();
-      vlong.readFields(in);
-      return vlong.get();
+        case DataType.LONG:
+            VLongWritable vlong = new VLongWritable();
+            vlong.readFields(in);
+            return vlong.get();
 
-    case DataType.FLOAT:
-      return in.readFloat();
+        case DataType.FLOAT:
+            return in.readFloat();
 
-    case DataType.DOUBLE:
-      return in.readDouble();
+        case DataType.DOUBLE:
+            return in.readDouble();
 
-    case DataType.BOOLEAN:
-      return in.readBoolean();
+        case DataType.BOOLEAN:
+            return in.readBoolean();
 
-    case DataType.BYTE:
-      return in.readByte();
+        case DataType.BYTE:
+            return in.readByte();
 
-    case DataType.SHORT:
-      return in.readShort();
+        case DataType.SHORT:
+            return in.readShort();
 
-    case DataType.NULL:
-      return null;
-      
-    case DataType.BINARY:
-      int len = in.readInt();
-      byte[] ba = new byte[len];
-      in.readFully(ba);
-      return ba;
-      
-    case DataType.MAP:
-      int size = in.readInt();
-      Map<Object,Object> m = new HashMap<Object, Object>(size);
-      for (int i = 0; i < size; i++) {
-          m.put(readDatum(in), readDatum(in));
-      }
-      return m;
+        case DataType.NULL:
+            return null;
 
-    case DataType.LIST:
-      int sz = in.readInt();
-      List<Object> list = new ArrayList<Object>(sz);
-      for(int i=0; i < sz; i++) {
-        list.add(readDatum(in));
-      }
-      return list;
+        case DataType.BINARY:
+            int len = in.readInt();
+            byte[] ba = new byte[len];
+            in.readFully(ba);
+            return ba;
 
-    default:
-      throw new IOException("Unexpected data type " + type +
-          " found in stream.");
+        case DataType.MAP:
+            int size = in.readInt();
+            Map<Object, Object> m = new HashMap<Object, Object>(size);
+            for (int i = 0; i < size; i++) {
+                m.put(readDatum(in), readDatum(in));
+            }
+            return m;
+
+        case DataType.LIST:
+            int sz = in.readInt();
+            List<Object> list = new ArrayList<Object>(sz);
+            for (int i = 0; i < sz; i++) {
+                list.add(readDatum(in));
+            }
+            return list;
+
+        default:
+            throw new IOException("Unexpected data type " + type +
+                " found in stream.");
+        }
     }
-  }
 
-  public static void writeDatum(DataOutput out, Object val) throws IOException {
-    // write the data type
-    byte type = DataType.findType(val);
-    switch (type) {
-    case DataType.LIST:
-      out.writeByte(DataType.LIST);
-      List<?> list = (List<?>)val;
-      int sz = list.size();
-      out.writeInt(sz);
-      for (int i = 0; i < sz; i++) {
-        writeDatum(out, list.get(i));
-      }
-      return;
+    public static void writeDatum(DataOutput out, Object val) throws IOException {
+        // write the data type
+        byte type = DataType.findType(val);
+        switch (type) {
+        case DataType.LIST:
+            out.writeByte(DataType.LIST);
+            List<?> list = (List<?>) val;
+            int sz = list.size();
+            out.writeInt(sz);
+            for (int i = 0; i < sz; i++) {
+                writeDatum(out, list.get(i));
+            }
+            return;
 
-    case DataType.MAP:
-      out.writeByte(DataType.MAP);
-      Map<?,?> m = (Map<?, ?>)val;
-      out.writeInt(m.size());
-      Iterator<?> i =
-        m.entrySet().iterator();
-      while (i.hasNext()) {
-        Entry<?,?> entry = (Entry<?, ?>) i.next();
-        writeDatum(out, entry.getKey());
-        writeDatum(out, entry.getValue());
-      }
-      return;
+        case DataType.MAP:
+            out.writeByte(DataType.MAP);
+            Map<?, ?> m = (Map<?, ?>) val;
+            out.writeInt(m.size());
+            Iterator<?> i =
+                m.entrySet().iterator();
+            while (i.hasNext()) {
+                Entry<?, ?> entry = (Entry<?, ?>) i.next();
+                writeDatum(out, entry.getKey());
+                writeDatum(out, entry.getValue());
+            }
+            return;
 
-    case DataType.INTEGER:
-      out.writeByte(DataType.INTEGER);
-      new VIntWritable((Integer)val).write(out);
-      return;
+        case DataType.INTEGER:
+            out.writeByte(DataType.INTEGER);
+            new VIntWritable((Integer) val).write(out);
+            return;
 
-    case DataType.LONG:
-      out.writeByte(DataType.LONG);
-      new VLongWritable((Long)val).write(out);
-      return;
+        case DataType.LONG:
+            out.writeByte(DataType.LONG);
+            new VLongWritable((Long) val).write(out);
+            return;
 
-    case DataType.FLOAT:
-      out.writeByte(DataType.FLOAT);
-      out.writeFloat((Float)val);
-      return;
+        case DataType.FLOAT:
+            out.writeByte(DataType.FLOAT);
+            out.writeFloat((Float) val);
+            return;
 
-    case DataType.DOUBLE:
-      out.writeByte(DataType.DOUBLE);
-      out.writeDouble((Double)val);
-      return;
+        case DataType.DOUBLE:
+            out.writeByte(DataType.DOUBLE);
+            out.writeDouble((Double) val);
+            return;
 
-    case DataType.BOOLEAN:
-      out.writeByte(DataType.BOOLEAN);
-      out.writeBoolean((Boolean)val);
-      return;
+        case DataType.BOOLEAN:
+            out.writeByte(DataType.BOOLEAN);
+            out.writeBoolean((Boolean) val);
+            return;
 
-    case DataType.BYTE:
-      out.writeByte(DataType.BYTE);
-      out.writeByte((Byte)val);
-      return;
+        case DataType.BYTE:
+            out.writeByte(DataType.BYTE);
+            out.writeByte((Byte) val);
+            return;
 
-    case DataType.SHORT:
-      out.writeByte(DataType.SHORT);
-      out.writeShort((Short)val);
-      return;
+        case DataType.SHORT:
+            out.writeByte(DataType.SHORT);
+            out.writeShort((Short) val);
+            return;
 
-    case DataType.STRING:
-      String s = (String)val;
-      byte[] utfBytes = s.getBytes(ReaderWriter.UTF8);
-      out.writeByte(DataType.STRING);
-      out.writeInt(utfBytes.length);
-      out.write(utfBytes);
-      return;
+        case DataType.STRING:
+            String s = (String) val;
+            byte[] utfBytes = s.getBytes(ReaderWriter.UTF8);
+            out.writeByte(DataType.STRING);
+            out.writeInt(utfBytes.length);
+            out.write(utfBytes);
+            return;
 
-    case DataType.BINARY:
-      byte[] ba = (byte[])val;
-      out.writeByte(DataType.BINARY);
-      out.writeInt(ba.length);
-      out.write(ba);
-      return;
-      
-    case DataType.NULL:
-      out.writeByte(DataType.NULL);
-      return;
+        case DataType.BINARY:
+            byte[] ba = (byte[]) val;
+            out.writeByte(DataType.BINARY);
+            out.writeInt(ba.length);
+            out.write(ba);
+            return;
 
-    default:
-      throw new IOException("Unexpected data type " + type +
-          " found in stream.");
+        case DataType.NULL:
+            out.writeByte(DataType.NULL);
+            return;
+
+        default:
+            throw new IOException("Unexpected data type " + type +
+                " found in stream.");
+        }
     }
-  }
 }
diff --git a/src/java/org/apache/hcatalog/data/schema/HCatFieldSchema.java b/src/java/org/apache/hcatalog/data/schema/HCatFieldSchema.java
index 39d26a7..720d7b0 100644
--- a/src/java/org/apache/hcatalog/data/schema/HCatFieldSchema.java
+++ b/src/java/org/apache/hcatalog/data/schema/HCatFieldSchema.java
@@ -46,20 +46,22 @@
         STRUCT;
 
         public static Category fromType(Type type) {
-            if (Type.ARRAY == type){
+            if (Type.ARRAY == type) {
                 return ARRAY;
-            }else if(Type.STRUCT == type){
+            } else if (Type.STRUCT == type) {
                 return STRUCT;
-            }else if (Type.MAP == type){
+            } else if (Type.MAP == type) {
                 return MAP;
-            }else{
+            } else {
                 return PRIMITIVE;
             }
         }
-    };
+    }
 
-    public boolean isComplex(){
-      return (category == Category.PRIMITIVE) ? false : true;
+    ;
+
+    public boolean isComplex() {
+        return (category == Category.PRIMITIVE) ? false : true;
     }
 
     /**
@@ -84,7 +86,7 @@
     private String typeString = null;
 
     @SuppressWarnings("unused")
-    private HCatFieldSchema(){
+    private HCatFieldSchema() {
         // preventing empty ctor from being callable
     }
 
@@ -92,7 +94,7 @@
      * Returns type of the field
      * @return type of the field
      */
-    public Type getType(){
+    public Type getType() {
         return type;
     }
 
@@ -100,7 +102,7 @@
      * Returns category of the field
      * @return category of the field
      */
-    public Category getCategory(){
+    public Category getCategory() {
         return category;
     }
 
@@ -108,11 +110,11 @@
      * Returns name of the field
      * @return name of the field
      */
-    public String getName(){
+    public String getName() {
         return fieldName;
     }
 
-    public String getComment(){
+    public String getComment() {
         return comment;
     }
 
@@ -123,7 +125,7 @@
      * @throws HCatException if call made on non-primitive types
      */
     public HCatFieldSchema(String fieldName, Type type, String comment) throws HCatException {
-        assertTypeInCategory(type,Category.PRIMITIVE,fieldName);
+        assertTypeInCategory(type, Category.PRIMITIVE, fieldName);
         this.fieldName = fieldName;
         this.type = type;
         this.category = Category.PRIMITIVE;
@@ -137,21 +139,21 @@
      * @param subSchema - subschema of the struct, or element schema of the elements in the array
      * @throws HCatException if call made on Primitive or Map types
      */
-    public HCatFieldSchema(String fieldName, Type type, HCatSchema subSchema,String comment) throws HCatException{
-        assertTypeNotInCategory(type,Category.PRIMITIVE);
-        assertTypeNotInCategory(type,Category.MAP);
+    public HCatFieldSchema(String fieldName, Type type, HCatSchema subSchema, String comment) throws HCatException {
+        assertTypeNotInCategory(type, Category.PRIMITIVE);
+        assertTypeNotInCategory(type, Category.MAP);
         this.fieldName = fieldName;
         this.type = type;
         this.category = Category.fromType(type);
         this.subSchema = subSchema;
-        if(type == Type.ARRAY){
-         this.subSchema.get(0).setName(null);
+        if (type == Type.ARRAY) {
+            this.subSchema.get(0).setName(null);
         }
         this.comment = comment;
     }
 
     private void setName(String name) {
-      this.fieldName = name;
+        this.fieldName = name;
     }
 
     /**
@@ -162,9 +164,9 @@
      * @param mapValueSchema - subschema of the value of the Map
      * @throws HCatException if call made on non-Map types
      */
-    public HCatFieldSchema(String fieldName, Type type, Type mapKeyType, HCatSchema mapValueSchema, String comment) throws HCatException{
-        assertTypeInCategory(type,Category.MAP, fieldName);
-        assertTypeInCategory(mapKeyType,Category.PRIMITIVE, fieldName);
+    public HCatFieldSchema(String fieldName, Type type, Type mapKeyType, HCatSchema mapValueSchema, String comment) throws HCatException {
+        assertTypeInCategory(type, Category.MAP, fieldName);
+        assertTypeInCategory(mapKeyType, Category.PRIMITIVE, fieldName);
         this.fieldName = fieldName;
         this.type = Type.MAP;
         this.category = Category.MAP;
@@ -175,66 +177,66 @@
     }
 
     public HCatSchema getStructSubSchema() throws HCatException {
-        assertTypeInCategory(this.type,Category.STRUCT, this.fieldName);
+        assertTypeInCategory(this.type, Category.STRUCT, this.fieldName);
         return subSchema;
     }
 
     public HCatSchema getArrayElementSchema() throws HCatException {
-        assertTypeInCategory(this.type,Category.ARRAY, this.fieldName);
+        assertTypeInCategory(this.type, Category.ARRAY, this.fieldName);
         return subSchema;
     }
 
     public Type getMapKeyType() throws HCatException {
-        assertTypeInCategory(this.type,Category.MAP, this.fieldName);
+        assertTypeInCategory(this.type, Category.MAP, this.fieldName);
         return mapKeyType;
     }
 
     public HCatSchema getMapValueSchema() throws HCatException {
-        assertTypeInCategory(this.type,Category.MAP, this.fieldName);
+        assertTypeInCategory(this.type, Category.MAP, this.fieldName);
         return subSchema;
     }
 
     private static void assertTypeInCategory(Type type, Category category, String fieldName) throws HCatException {
         Category typeCategory = Category.fromType(type);
-        if (typeCategory != category){
-            throw new HCatException("Type category mismatch. Expected "+category+" but type "+type+" in category "+typeCategory+ " (field "+fieldName+")");
+        if (typeCategory != category) {
+            throw new HCatException("Type category mismatch. Expected " + category + " but type " + type + " in category " + typeCategory + " (field " + fieldName + ")");
         }
     }
 
     private static void assertTypeNotInCategory(Type type, Category category) throws HCatException {
         Category typeCategory = Category.fromType(type);
-        if (typeCategory == category){
-            throw new HCatException("Type category mismatch. Expected type "+type+" not in category "+category+" but was so.");
+        if (typeCategory == category) {
+            throw new HCatException("Type category mismatch. Expected type " + type + " not in category " + category + " but was so.");
         }
     }
 
     @Override
     public String toString() {
-      return new ToStringBuilder(this)
-          .append("fieldName", fieldName)
-          .append("comment", comment)
-          .append("type", getTypeString())
-          .append("category", category)
-          .toString();
+        return new ToStringBuilder(this)
+            .append("fieldName", fieldName)
+            .append("comment", comment)
+            .append("type", getTypeString())
+            .append("category", category)
+            .toString();
     }
 
-    public String getTypeString(){
-        if (typeString != null){
+    public String getTypeString() {
+        if (typeString != null) {
             return typeString;
         }
 
         StringBuilder sb = new StringBuilder();
-        if (Category.PRIMITIVE == category){
+        if (Category.PRIMITIVE == category) {
             sb.append(type);
-        }else if (Category.STRUCT == category){
+        } else if (Category.STRUCT == category) {
             sb.append("struct<");
             sb.append(subSchema.getSchemaAsTypeString());
             sb.append(">");
-        }else if (Category.ARRAY == category){
+        } else if (Category.ARRAY == category) {
             sb.append("array<");
             sb.append(subSchema.getSchemaAsTypeString());
             sb.append(">");
-        }else if (Category.MAP == category){
+        } else if (Category.MAP == category) {
             sb.append("map<");
             sb.append(mapKeyType);
             sb.append(",");
diff --git a/src/java/org/apache/hcatalog/data/schema/HCatSchema.java b/src/java/org/apache/hcatalog/data/schema/HCatSchema.java
index b93a1b6..a18cb75 100644
--- a/src/java/org/apache/hcatalog/data/schema/HCatSchema.java
+++ b/src/java/org/apache/hcatalog/data/schema/HCatSchema.java
@@ -30,12 +30,12 @@
  * HCatSchema. This class is NOT thread-safe.
  */
 
-public class HCatSchema implements Serializable{
+public class HCatSchema implements Serializable {
 
     private static final long serialVersionUID = 1L;
 
     private final List<HCatFieldSchema> fieldSchemas;
-    private final Map<String,Integer> fieldPositionMap;
+    private final Map<String, Integer> fieldPositionMap;
     private final List<String> fieldNames;
 
     /**
@@ -44,44 +44,44 @@
      * on fieldSchemas won't get reflected in HCatSchema.  Each fieldSchema's name
      * in the list must be unique, otherwise throws IllegalArgumentException.
      */
-    public HCatSchema(final List<HCatFieldSchema> fieldSchemas){
+    public HCatSchema(final List<HCatFieldSchema> fieldSchemas) {
         this.fieldSchemas = new ArrayList<HCatFieldSchema>(fieldSchemas);
         int idx = 0;
-        fieldPositionMap = new HashMap<String,Integer>();
+        fieldPositionMap = new HashMap<String, Integer>();
         fieldNames = new ArrayList<String>();
-        for (HCatFieldSchema field : fieldSchemas){
-            if(field == null)
+        for (HCatFieldSchema field : fieldSchemas) {
+            if (field == null)
                 throw new IllegalArgumentException("Field cannot be null");
 
             String fieldName = field.getName();
-            if(fieldPositionMap.containsKey(fieldName))
+            if (fieldPositionMap.containsKey(fieldName))
                 throw new IllegalArgumentException("Field named " + fieldName +
-                                                   " already exists");
+                    " already exists");
             fieldPositionMap.put(fieldName, idx);
             fieldNames.add(fieldName);
             idx++;
         }
     }
 
-    public void append(final HCatFieldSchema hfs) throws HCatException{
-      if(hfs == null)
-        throw new HCatException("Attempt to append null HCatFieldSchema in HCatSchema.");
+    public void append(final HCatFieldSchema hfs) throws HCatException {
+        if (hfs == null)
+            throw new HCatException("Attempt to append null HCatFieldSchema in HCatSchema.");
 
-      String fieldName = hfs.getName();
-      if(fieldPositionMap.containsKey(fieldName))
-        throw new HCatException("Attempt to append HCatFieldSchema with already " +
-            "existing name: " + fieldName + ".");
+        String fieldName = hfs.getName();
+        if (fieldPositionMap.containsKey(fieldName))
+            throw new HCatException("Attempt to append HCatFieldSchema with already " +
+                "existing name: " + fieldName + ".");
 
-      this.fieldSchemas.add(hfs);
-      this.fieldNames.add(fieldName);
-      this.fieldPositionMap.put(fieldName, this.size()-1);
+        this.fieldSchemas.add(hfs);
+        this.fieldNames.add(fieldName);
+        this.fieldPositionMap.put(fieldName, this.size() - 1);
     }
 
     /**
      *  Users are not allowed to modify the list directly, since HCatSchema
      *  maintains internal state. Use append/remove to modify the schema.
      */
-    public List<HCatFieldSchema> getFields(){
+    public List<HCatFieldSchema> getFields() {
         return Collections.unmodifiableList(this.fieldSchemas);
     }
 
@@ -91,14 +91,14 @@
      * present, returns null.
      */
     public Integer getPosition(String fieldName) {
-      return fieldPositionMap.get(fieldName);
+        return fieldPositionMap.get(fieldName);
     }
 
     public HCatFieldSchema get(String fieldName) throws HCatException {
         return get(getPosition(fieldName));
     }
 
-    public List<String> getFieldNames(){
+    public List<String> getFieldNames() {
         return this.fieldNames;
     }
 
@@ -106,32 +106,32 @@
         return fieldSchemas.get(position);
     }
 
-    public int size(){
-      return fieldSchemas.size();
+    public int size() {
+        return fieldSchemas.size();
     }
 
     public void remove(final HCatFieldSchema hcatFieldSchema) throws HCatException {
 
-      if(!fieldSchemas.contains(hcatFieldSchema)){
-        throw new HCatException("Attempt to delete a non-existent column from HCat Schema: "+ hcatFieldSchema);
-      }
+        if (!fieldSchemas.contains(hcatFieldSchema)) {
+            throw new HCatException("Attempt to delete a non-existent column from HCat Schema: " + hcatFieldSchema);
+        }
 
-      fieldSchemas.remove(hcatFieldSchema);
-      fieldPositionMap.remove(hcatFieldSchema);
-      fieldNames.remove(hcatFieldSchema.getName());
+        fieldSchemas.remove(hcatFieldSchema);
+        fieldPositionMap.remove(hcatFieldSchema);
+        fieldNames.remove(hcatFieldSchema.getName());
     }
 
     @Override
     public String toString() {
         boolean first = true;
         StringBuilder sb = new StringBuilder();
-        for (HCatFieldSchema hfs : fieldSchemas){
-            if (!first){
+        for (HCatFieldSchema hfs : fieldSchemas) {
+            if (!first) {
                 sb.append(",");
-            }else{
+            } else {
                 first = false;
             }
-            if (hfs.getName() != null){
+            if (hfs.getName() != null) {
                 sb.append(hfs.getName());
                 sb.append(":");
             }
@@ -140,16 +140,16 @@
         return sb.toString();
     }
 
-    public String getSchemaAsTypeString(){
+    public String getSchemaAsTypeString() {
         boolean first = true;
         StringBuilder sb = new StringBuilder();
-        for (HCatFieldSchema hfs : fieldSchemas){
-            if (!first){
+        for (HCatFieldSchema hfs : fieldSchemas) {
+            if (!first) {
                 sb.append(",");
-            }else{
+            } else {
                 first = false;
             }
-            if (hfs.getName() != null){
+            if (hfs.getName() != null) {
                 sb.append(hfs.getName());
                 sb.append(":");
             }
@@ -170,7 +170,7 @@
             return false;
         }
         HCatSchema other = (HCatSchema) obj;
-       if (!this.getFields().equals(other.getFields())) {
+        if (!this.getFields().equals(other.getFields())) {
             return false;
         }
         return true;
diff --git a/src/java/org/apache/hcatalog/data/schema/HCatSchemaUtils.java b/src/java/org/apache/hcatalog/data/schema/HCatSchemaUtils.java
index ca7afa0..38a5fa9 100644
--- a/src/java/org/apache/hcatalog/data/schema/HCatSchemaUtils.java
+++ b/src/java/org/apache/hcatalog/data/schema/HCatSchemaUtils.java
@@ -38,15 +38,15 @@
 
 public class HCatSchemaUtils {
 
-    public static CollectionBuilder getStructSchemaBuilder(){
+    public static CollectionBuilder getStructSchemaBuilder() {
         return new CollectionBuilder();
     }
 
-    public static CollectionBuilder getListSchemaBuilder(){
+    public static CollectionBuilder getListSchemaBuilder() {
         return new CollectionBuilder();
     }
 
-    public static MapBuilder getMapSchemaBuilder(){
+    public static MapBuilder getMapSchemaBuilder() {
         return new MapBuilder();
     }
 
@@ -58,21 +58,21 @@
     public static class CollectionBuilder extends HCatSchemaBuilder { // for STRUCTS(multiple-add-calls) and LISTS(single-add-call)
         List<HCatFieldSchema> fieldSchemas = null;
 
-        CollectionBuilder(){
+        CollectionBuilder() {
             fieldSchemas = new ArrayList<HCatFieldSchema>();
         }
 
-        public CollectionBuilder addField(FieldSchema fieldSchema) throws HCatException{
+        public CollectionBuilder addField(FieldSchema fieldSchema) throws HCatException {
             return this.addField(getHCatFieldSchema(fieldSchema));
         }
 
-        public CollectionBuilder addField(HCatFieldSchema fieldColumnSchema){
+        public CollectionBuilder addField(HCatFieldSchema fieldColumnSchema) {
             fieldSchemas.add(fieldColumnSchema);
             return this;
         }
 
         @Override
-        public HCatSchema build() throws HCatException{
+        public HCatSchema build() throws HCatException {
             return new HCatSchema(fieldSchemas);
         }
 
@@ -86,7 +86,7 @@
         @Override
         public HCatSchema build() throws HCatException {
             List<HCatFieldSchema> fslist = new ArrayList<HCatFieldSchema>();
-            fslist.add(new HCatFieldSchema(null,Type.MAP,keyType,valueSchema,null));
+            fslist.add(new HCatFieldSchema(null, Type.MAP, keyType, valueSchema, null));
             return new HCatSchema(fslist);
         }
 
@@ -118,26 +118,26 @@
     private static HCatFieldSchema getHCatFieldSchema(String fieldName, TypeInfo fieldTypeInfo) throws HCatException {
         Category typeCategory = fieldTypeInfo.getCategory();
         HCatFieldSchema hCatFieldSchema;
-        if (Category.PRIMITIVE == typeCategory){
-            hCatFieldSchema = new HCatFieldSchema(fieldName,getPrimitiveHType(fieldTypeInfo),null);
+        if (Category.PRIMITIVE == typeCategory) {
+            hCatFieldSchema = new HCatFieldSchema(fieldName, getPrimitiveHType(fieldTypeInfo), null);
         } else if (Category.STRUCT == typeCategory) {
-            HCatSchema subSchema = constructHCatSchema((StructTypeInfo)fieldTypeInfo);
-            hCatFieldSchema = new HCatFieldSchema(fieldName,HCatFieldSchema.Type.STRUCT,subSchema,null);
+            HCatSchema subSchema = constructHCatSchema((StructTypeInfo) fieldTypeInfo);
+            hCatFieldSchema = new HCatFieldSchema(fieldName, HCatFieldSchema.Type.STRUCT, subSchema, null);
         } else if (Category.LIST == typeCategory) {
-            HCatSchema subSchema = getHCatSchema(((ListTypeInfo)fieldTypeInfo).getListElementTypeInfo());
-            hCatFieldSchema = new HCatFieldSchema(fieldName,HCatFieldSchema.Type.ARRAY,subSchema,null);
+            HCatSchema subSchema = getHCatSchema(((ListTypeInfo) fieldTypeInfo).getListElementTypeInfo());
+            hCatFieldSchema = new HCatFieldSchema(fieldName, HCatFieldSchema.Type.ARRAY, subSchema, null);
         } else if (Category.MAP == typeCategory) {
-            HCatFieldSchema.Type mapKeyType =  getPrimitiveHType(((MapTypeInfo)fieldTypeInfo).getMapKeyTypeInfo());
-            HCatSchema subSchema = getHCatSchema(((MapTypeInfo)fieldTypeInfo).getMapValueTypeInfo());
-            hCatFieldSchema = new HCatFieldSchema(fieldName,HCatFieldSchema.Type.MAP,mapKeyType,subSchema,null);
-        } else{
-            throw new TypeNotPresentException(fieldTypeInfo.getTypeName(),null);
+            HCatFieldSchema.Type mapKeyType = getPrimitiveHType(((MapTypeInfo) fieldTypeInfo).getMapKeyTypeInfo());
+            HCatSchema subSchema = getHCatSchema(((MapTypeInfo) fieldTypeInfo).getMapValueTypeInfo());
+            hCatFieldSchema = new HCatFieldSchema(fieldName, HCatFieldSchema.Type.MAP, mapKeyType, subSchema, null);
+        } else {
+            throw new TypeNotPresentException(fieldTypeInfo.getTypeName(), null);
         }
         return hCatFieldSchema;
     }
 
     private static Type getPrimitiveHType(TypeInfo basePrimitiveTypeInfo) {
-        switch(((PrimitiveTypeInfo)basePrimitiveTypeInfo).getPrimitiveCategory()) {
+        switch (((PrimitiveTypeInfo) basePrimitiveTypeInfo).getPrimitiveCategory()) {
         case BOOLEAN:
             return HCatContext.getInstance().getConf().getBoolean(
                 HCatConstants.HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER,
@@ -165,17 +165,17 @@
         case BINARY:
             return Type.BINARY;
         default:
-            throw new TypeNotPresentException(((PrimitiveTypeInfo)basePrimitiveTypeInfo).getTypeName(), null);
+            throw new TypeNotPresentException(((PrimitiveTypeInfo) basePrimitiveTypeInfo).getTypeName(), null);
         }
     }
 
-    public static HCatSchema getHCatSchema(Schema schema) throws HCatException{
+    public static HCatSchema getHCatSchema(Schema schema) throws HCatException {
         return getHCatSchema(schema.getFieldSchemas());
     }
 
-    public static HCatSchema getHCatSchema(List<? extends FieldSchema> fslist) throws HCatException{
+    public static HCatSchema getHCatSchema(List<? extends FieldSchema> fslist) throws HCatException {
         CollectionBuilder builder = getStructSchemaBuilder();
-        for (FieldSchema fieldSchema : fslist){
+        for (FieldSchema fieldSchema : fslist) {
             builder.addField(fieldSchema);
         }
         return builder.build();
@@ -183,8 +183,8 @@
 
     private static HCatSchema constructHCatSchema(StructTypeInfo stypeInfo) throws HCatException {
         CollectionBuilder builder = getStructSchemaBuilder();
-        for (String fieldName : ((StructTypeInfo)stypeInfo).getAllStructFieldNames()){
-            builder.addField(getHCatFieldSchema(fieldName,((StructTypeInfo)stypeInfo).getStructFieldTypeInfo(fieldName)));
+        for (String fieldName : ((StructTypeInfo) stypeInfo).getAllStructFieldNames()) {
+            builder.addField(getHCatFieldSchema(fieldName, ((StructTypeInfo) stypeInfo).getStructFieldTypeInfo(fieldName)));
         }
         return builder.build();
     }
@@ -192,22 +192,22 @@
     public static HCatSchema getHCatSchema(TypeInfo typeInfo) throws HCatException {
         Category typeCategory = typeInfo.getCategory();
         HCatSchema hCatSchema;
-        if (Category.PRIMITIVE == typeCategory){
-            hCatSchema = getStructSchemaBuilder().addField(new HCatFieldSchema(null,getPrimitiveHType(typeInfo),null)).build();
+        if (Category.PRIMITIVE == typeCategory) {
+            hCatSchema = getStructSchemaBuilder().addField(new HCatFieldSchema(null, getPrimitiveHType(typeInfo), null)).build();
         } else if (Category.STRUCT == typeCategory) {
             HCatSchema subSchema = constructHCatSchema((StructTypeInfo) typeInfo);
-            hCatSchema = getStructSchemaBuilder().addField(new HCatFieldSchema(null,Type.STRUCT,subSchema,null)).build();
+            hCatSchema = getStructSchemaBuilder().addField(new HCatFieldSchema(null, Type.STRUCT, subSchema, null)).build();
         } else if (Category.LIST == typeCategory) {
             CollectionBuilder builder = getListSchemaBuilder();
-            builder.addField(getHCatFieldSchema(null,((ListTypeInfo)typeInfo).getListElementTypeInfo()));
-            hCatSchema = new HCatSchema(Arrays.asList(new HCatFieldSchema("",Type.ARRAY, builder.build(), "")));
+            builder.addField(getHCatFieldSchema(null, ((ListTypeInfo) typeInfo).getListElementTypeInfo()));
+            hCatSchema = new HCatSchema(Arrays.asList(new HCatFieldSchema("", Type.ARRAY, builder.build(), "")));
         } else if (Category.MAP == typeCategory) {
-            HCatFieldSchema.Type mapKeyType =  getPrimitiveHType(((MapTypeInfo)typeInfo).getMapKeyTypeInfo());
-            HCatSchema subSchema = getHCatSchema(((MapTypeInfo)typeInfo).getMapValueTypeInfo());
+            HCatFieldSchema.Type mapKeyType = getPrimitiveHType(((MapTypeInfo) typeInfo).getMapKeyTypeInfo());
+            HCatSchema subSchema = getHCatSchema(((MapTypeInfo) typeInfo).getMapValueTypeInfo());
             MapBuilder builder = getMapSchemaBuilder();
             hCatSchema = builder.withKeyType(mapKeyType).withValueSchema(subSchema).build();
-        } else{
-            throw new TypeNotPresentException(typeInfo.getTypeName(),null);
+        } else {
+            throw new TypeNotPresentException(typeInfo.getTypeName(), null);
         }
         return hCatSchema;
     }
@@ -217,20 +217,20 @@
     }
 
     public static HCatSchema getHCatSchema(String schemaString) throws HCatException {
-        if ((schemaString == null) || (schemaString.trim().isEmpty())){
+        if ((schemaString == null) || (schemaString.trim().isEmpty())) {
             return new HCatSchema(new ArrayList<HCatFieldSchema>()); // empty HSchema construct
         }
-        HCatSchema outerSchema = getHCatSchemaFromTypeString("struct<"+schemaString+">");
+        HCatSchema outerSchema = getHCatSchemaFromTypeString("struct<" + schemaString + ">");
         return outerSchema.get(0).getStructSubSchema();
     }
 
-    public static FieldSchema getFieldSchema(HCatFieldSchema hcatFieldSchema){
-        return new FieldSchema(hcatFieldSchema.getName(),hcatFieldSchema.getTypeString(),hcatFieldSchema.getComment());
+    public static FieldSchema getFieldSchema(HCatFieldSchema hcatFieldSchema) {
+        return new FieldSchema(hcatFieldSchema.getName(), hcatFieldSchema.getTypeString(), hcatFieldSchema.getComment());
     }
 
-    public static List<FieldSchema> getFieldSchemas(List<HCatFieldSchema> hcatFieldSchemas){
+    public static List<FieldSchema> getFieldSchemas(List<HCatFieldSchema> hcatFieldSchemas) {
         List<FieldSchema> lfs = new ArrayList<FieldSchema>();
-        for (HCatFieldSchema hfs : hcatFieldSchemas){
+        for (HCatFieldSchema hfs : hcatFieldSchemas) {
             lfs.add(getFieldSchema(hfs));
         }
         return lfs;
diff --git a/src/java/org/apache/hcatalog/data/transfer/DataTransferFactory.java b/src/java/org/apache/hcatalog/data/transfer/DataTransferFactory.java
index 0f1a39c..f10ec8c 100644
--- a/src/java/org/apache/hcatalog/data/transfer/DataTransferFactory.java
+++ b/src/java/org/apache/hcatalog/data/transfer/DataTransferFactory.java
@@ -34,102 +34,102 @@
 
 public class DataTransferFactory {
 
-  /**
-   * This should be called once from master node to obtain an instance of
-   * {@link HCatReader}.
-   * 
-   * @param re
-   *          ReadEntity built using {@link ReadEntity.Builder}
-   * @param config
-   *          any configuration which master node wants to pass to HCatalog
-   * @return {@link HCatReader}
-   */
-  public static HCatReader getHCatReader(final ReadEntity re,
-      final Map<String, String> config) {
-    // In future, this may examine ReadEntity and/or config to return
-    // appropriate HCatReader
-    return new HCatInputFormatReader(re, config);
-  }
+    /**
+     * This should be called once from master node to obtain an instance of
+     * {@link HCatReader}.
+     *
+     * @param re
+     *          ReadEntity built using {@link ReadEntity.Builder}
+     * @param config
+     *          any configuration which master node wants to pass to HCatalog
+     * @return {@link HCatReader}
+     */
+    public static HCatReader getHCatReader(final ReadEntity re,
+                                           final Map<String, String> config) {
+        // In future, this may examine ReadEntity and/or config to return
+        // appropriate HCatReader
+        return new HCatInputFormatReader(re, config);
+    }
 
-  /**
-   * This should only be called once from every slave node to obtain an instance
-   * of {@link HCatReader}.
-   * 
-   * @param split
-   *          input split obtained at master node
-   * @param config
-   *          configuration obtained at master node
-   * @return {@link HCatReader}
-   */
-  public static HCatReader getHCatReader(final InputSplit split,
-      final Configuration config) {
-    // In future, this may examine config to return appropriate HCatReader
-    return getHCatReader(split, config, DefaultStateProvider.get());
-  }
+    /**
+     * This should only be called once from every slave node to obtain an instance
+     * of {@link HCatReader}.
+     *
+     * @param split
+     *          input split obtained at master node
+     * @param config
+     *          configuration obtained at master node
+     * @return {@link HCatReader}
+     */
+    public static HCatReader getHCatReader(final InputSplit split,
+                                           final Configuration config) {
+        // In future, this may examine config to return appropriate HCatReader
+        return getHCatReader(split, config, DefaultStateProvider.get());
+    }
 
-  /**
-   * This should only be called once from every slave node to obtain an instance
-   * of {@link HCatReader}. This should be called if an external system has some
-   * state to provide to HCatalog.
-   * 
-   * @param split
-   *          input split obtained at master node
-   * @param config
-   *          configuration obtained at master node
-   * @param sp
-   *          {@link StateProvider}
-   * @return {@link HCatReader}
-   */
-  public static HCatReader getHCatReader(final InputSplit split,
-      final Configuration config, StateProvider sp) {
-    // In future, this may examine config to return appropriate HCatReader
-    return new HCatInputFormatReader(split, config, sp);
-  }
+    /**
+     * This should only be called once from every slave node to obtain an instance
+     * of {@link HCatReader}. This should be called if an external system has some
+     * state to provide to HCatalog.
+     *
+     * @param split
+     *          input split obtained at master node
+     * @param config
+     *          configuration obtained at master node
+     * @param sp
+     *          {@link StateProvider}
+     * @return {@link HCatReader}
+     */
+    public static HCatReader getHCatReader(final InputSplit split,
+                                           final Configuration config, StateProvider sp) {
+        // In future, this may examine config to return appropriate HCatReader
+        return new HCatInputFormatReader(split, config, sp);
+    }
 
-  /**
-   * This should be called at master node to obtain an instance of
-   * {@link HCatWriter}.
-   * 
-   * @param we
-   *          WriteEntity built using {@link WriteEntity.Builder}
-   * @param config
-   *          any configuration which master wants to pass to HCatalog
-   * @return {@link HCatWriter}
-   */
-  public static HCatWriter getHCatWriter(final WriteEntity we,
-      final Map<String, String> config) {
-    // In future, this may examine WriteEntity and/or config to return
-    // appropriate HCatWriter
-    return new HCatOutputFormatWriter(we, config);
-  }
+    /**
+     * This should be called at master node to obtain an instance of
+     * {@link HCatWriter}.
+     *
+     * @param we
+     *          WriteEntity built using {@link WriteEntity.Builder}
+     * @param config
+     *          any configuration which master wants to pass to HCatalog
+     * @return {@link HCatWriter}
+     */
+    public static HCatWriter getHCatWriter(final WriteEntity we,
+                                           final Map<String, String> config) {
+        // In future, this may examine WriteEntity and/or config to return
+        // appropriate HCatWriter
+        return new HCatOutputFormatWriter(we, config);
+    }
 
-  /**
-   * This should be called at slave nodes to obtain an instance of
-   * {@link HCatWriter}.
-   * 
-   * @param cntxt
-   *          {@link WriterContext} obtained at master node
-   * @return {@link HCatWriter}
-   */
-  public static HCatWriter getHCatWriter(final WriterContext cntxt) {
-    // In future, this may examine context to return appropriate HCatWriter
-    return getHCatWriter(cntxt, DefaultStateProvider.get());
-  }
+    /**
+     * This should be called at slave nodes to obtain an instance of
+     * {@link HCatWriter}.
+     *
+     * @param cntxt
+     *          {@link WriterContext} obtained at master node
+     * @return {@link HCatWriter}
+     */
+    public static HCatWriter getHCatWriter(final WriterContext cntxt) {
+        // In future, this may examine context to return appropriate HCatWriter
+        return getHCatWriter(cntxt, DefaultStateProvider.get());
+    }
 
-  /**
-   * This should be called at slave nodes to obtain an instance of
-   * {@link HCatWriter}. If an external system has some mechanism for providing
-   * state to HCatalog, this constructor can be used.
-   * 
-   * @param cntxt
-   *          {@link WriterContext} obtained at master node
-   * @param sp
-   *          {@link StateProvider}
-   * @return {@link HCatWriter}
-   */
-  public static HCatWriter getHCatWriter(final WriterContext cntxt,
-      final StateProvider sp) {
-    // In future, this may examine context to return appropriate HCatWriter
-    return new HCatOutputFormatWriter(cntxt.getConf(), sp);
-  }
+    /**
+     * This should be called at slave nodes to obtain an instance of
+     * {@link HCatWriter}. If an external system has some mechanism for providing
+     * state to HCatalog, this constructor can be used.
+     *
+     * @param cntxt
+     *          {@link WriterContext} obtained at master node
+     * @param sp
+     *          {@link StateProvider}
+     * @return {@link HCatWriter}
+     */
+    public static HCatWriter getHCatWriter(final WriterContext cntxt,
+                                           final StateProvider sp) {
+        // In future, this may examine context to return appropriate HCatWriter
+        return new HCatOutputFormatWriter(cntxt.getConf(), sp);
+    }
 }
diff --git a/src/java/org/apache/hcatalog/data/transfer/EntityBase.java b/src/java/org/apache/hcatalog/data/transfer/EntityBase.java
index bbca1f8..668057f 100644
--- a/src/java/org/apache/hcatalog/data/transfer/EntityBase.java
+++ b/src/java/org/apache/hcatalog/data/transfer/EntityBase.java
@@ -29,31 +29,31 @@
 
 abstract class EntityBase {
 
-  String region;
-  String tableName;
-  String dbName;
-  Map<String, String> partitionKVs;
+    String region;
+    String tableName;
+    String dbName;
+    Map<String, String> partitionKVs;
 
-  /**
-   * Common methods for {@link ReadEntity} and {@link WriteEntity}
-   */
+    /**
+     * Common methods for {@link ReadEntity} and {@link WriteEntity}
+     */
 
-  abstract static class Entity extends EntityBase {
+    abstract static class Entity extends EntityBase {
 
-    public String getRegion() {
-      return region;
+        public String getRegion() {
+            return region;
+        }
+
+        public String getTableName() {
+            return tableName;
+        }
+
+        public String getDbName() {
+            return dbName;
+        }
+
+        public Map<String, String> getPartitionKVs() {
+            return partitionKVs;
+        }
     }
-
-    public String getTableName() {
-      return tableName;
-    }
-
-    public String getDbName() {
-      return dbName;
-    }
-
-    public Map<String, String> getPartitionKVs() {
-      return partitionKVs;
-    }
-  }
 }
diff --git a/src/java/org/apache/hcatalog/data/transfer/HCatReader.java b/src/java/org/apache/hcatalog/data/transfer/HCatReader.java
index d771dcb..84465f9 100644
--- a/src/java/org/apache/hcatalog/data/transfer/HCatReader.java
+++ b/src/java/org/apache/hcatalog/data/transfer/HCatReader.java
@@ -34,68 +34,68 @@
 
 public abstract class HCatReader {
 
-  /**
-   * This should be called at master node to obtain {@link ReaderContext} which
-   * then should be serialized and sent to slave nodes.
-   * 
-   * @return {@link ReaderContext}
-   * @throws HCatException
-   */
-  public abstract ReaderContext prepareRead() throws HCatException;
+    /**
+     * This should be called at master node to obtain {@link ReaderContext} which
+     * then should be serialized and sent to slave nodes.
+     *
+     * @return {@link ReaderContext}
+     * @throws HCatException
+     */
+    public abstract ReaderContext prepareRead() throws HCatException;
 
-  /**
-   * This should be called at slave nodes to read {@link HCatRecord}s
-   * 
-   * @return {@link Iterator} of {@link HCatRecord}
-   * @throws HCatException
-   */
-  public abstract Iterator<HCatRecord> read() throws HCatException;
+    /**
+     * This should be called at slave nodes to read {@link HCatRecord}s
+     *
+     * @return {@link Iterator} of {@link HCatRecord}
+     * @throws HCatException
+     */
+    public abstract Iterator<HCatRecord> read() throws HCatException;
 
-  /**
-   * This constructor will be invoked by {@link DataTransferFactory} at master
-   * node. Don't use this constructor. Instead, use {@link DataTransferFactory}
-   * 
-   * @param re
-   * @param config
-   */
-  protected HCatReader(final ReadEntity re, final Map<String, String> config) {
-    this(config);
-    this.re = re;
-  }
-
-  /**
-   * This constructor will be invoked by {@link DataTransferFactory} at slave
-   * nodes. Don't use this constructor. Instead, use {@link DataTransferFactory}
-   * 
-   * @param config
-   * @param sp
-   */
-
-  protected HCatReader(final Configuration config, StateProvider sp) {
-    this.conf = config;
-    this.sp = sp;
-  }
-
-  protected ReadEntity re; // This will be null at slaves.
-  protected Configuration conf;
-  protected ReaderContext info;
-  protected StateProvider sp; // This will be null at master.
-
-  private HCatReader(final Map<String, String> config) {
-    Configuration conf = new Configuration();
-    if (null != config) {
-      for (Entry<String, String> kv : config.entrySet()) {
-        conf.set(kv.getKey(), kv.getValue());
-      }
+    /**
+     * This constructor will be invoked by {@link DataTransferFactory} at master
+     * node. Don't use this constructor. Instead, use {@link DataTransferFactory}
+     *
+     * @param re
+     * @param config
+     */
+    protected HCatReader(final ReadEntity re, final Map<String, String> config) {
+        this(config);
+        this.re = re;
     }
-    this.conf = conf;
-  }
 
-  public Configuration getConf() {
-    if (null == conf) {
-      throw new IllegalStateException(
-          "HCatReader is not constructed correctly.");
+    /**
+     * This constructor will be invoked by {@link DataTransferFactory} at slave
+     * nodes. Don't use this constructor. Instead, use {@link DataTransferFactory}
+     *
+     * @param config
+     * @param sp
+     */
+
+    protected HCatReader(final Configuration config, StateProvider sp) {
+        this.conf = config;
+        this.sp = sp;
     }
-    return conf;
-  }
+
+    protected ReadEntity re; // This will be null at slaves.
+    protected Configuration conf;
+    protected ReaderContext info;
+    protected StateProvider sp; // This will be null at master.
+
+    private HCatReader(final Map<String, String> config) {
+        Configuration conf = new Configuration();
+        if (null != config) {
+            for (Entry<String, String> kv : config.entrySet()) {
+                conf.set(kv.getKey(), kv.getValue());
+            }
+        }
+        this.conf = conf;
+    }
+
+    public Configuration getConf() {
+        if (null == conf) {
+            throw new IllegalStateException(
+                "HCatReader is not constructed correctly.");
+        }
+        return conf;
+    }
 }
diff --git a/src/java/org/apache/hcatalog/data/transfer/HCatWriter.java b/src/java/org/apache/hcatalog/data/transfer/HCatWriter.java
index 2e924bf..07c33c3 100644
--- a/src/java/org/apache/hcatalog/data/transfer/HCatWriter.java
+++ b/src/java/org/apache/hcatalog/data/transfer/HCatWriter.java
@@ -35,79 +35,79 @@
 
 public abstract class HCatWriter {
 
-  protected Configuration conf;
-  protected WriteEntity we; // This will be null at slave nodes.
-  protected WriterContext info;
-  protected StateProvider sp;
+    protected Configuration conf;
+    protected WriteEntity we; // This will be null at slave nodes.
+    protected WriterContext info;
+    protected StateProvider sp;
 
-  /**
-   * External system should invoke this method exactly once from a master node.
-   * 
-   * @return {@link WriterContext} This should be serialized and sent to slave
-   *         nodes to construct HCatWriter there.
-   * @throws HCatException
-   */
-  public abstract WriterContext prepareWrite() throws HCatException;
+    /**
+     * External system should invoke this method exactly once from a master node.
+     *
+     * @return {@link WriterContext} This should be serialized and sent to slave
+     *         nodes to construct HCatWriter there.
+     * @throws HCatException
+     */
+    public abstract WriterContext prepareWrite() throws HCatException;
 
-  /**
-   * This method should be used at slave needs to perform writes.
-   * 
-   * @param recordItr
-   *          {@link Iterator} records to be written into HCatalog.
-   * @throws {@link HCatException}
-   */
-  public abstract void write(final Iterator<HCatRecord> recordItr)
-      throws HCatException;
+    /**
+     * This method should be used at slave needs to perform writes.
+     *
+     * @param recordItr
+     *          {@link Iterator} records to be written into HCatalog.
+     * @throws {@link HCatException}
+     */
+    public abstract void write(final Iterator<HCatRecord> recordItr)
+        throws HCatException;
 
-  /**
-   * This method should be called at master node. Primary purpose of this is to
-   * do metadata commit.
-   * 
-   * @throws {@link HCatException}
-   */
-  public abstract void commit(final WriterContext context) throws HCatException;
+    /**
+     * This method should be called at master node. Primary purpose of this is to
+     * do metadata commit.
+     *
+     * @throws {@link HCatException}
+     */
+    public abstract void commit(final WriterContext context) throws HCatException;
 
-  /**
-   * This method should be called at master node. Primary purpose of this is to
-   * do cleanups in case of failures.
-   * 
-   * @throws {@link HCatException} *
-   */
-  public abstract void abort(final WriterContext context) throws HCatException;
+    /**
+     * This method should be called at master node. Primary purpose of this is to
+     * do cleanups in case of failures.
+     *
+     * @throws {@link HCatException} *
+     */
+    public abstract void abort(final WriterContext context) throws HCatException;
 
-  /**
-   * This constructor will be used at master node
-   * 
-   * @param we
-   *          WriteEntity defines where in storage records should be written to.
-   * @param config
-   *          Any configuration which external system wants to communicate to
-   *          HCatalog for performing writes.
-   */
-  protected HCatWriter(final WriteEntity we, final Map<String, String> config) {
-    this(config);
-    this.we = we;
-  }
-
-  /**
-   * This constructor will be used at slave nodes.
-   * 
-   * @param config
-   */
-  protected HCatWriter(final Configuration config, final StateProvider sp) {
-    this.conf = config;
-    this.sp = sp;
-  }
-
-  private HCatWriter(final Map<String, String> config) {
-    Configuration conf = new Configuration();
-    if (config != null) {
-      // user is providing config, so it could be null.
-      for (Entry<String, String> kv : config.entrySet()) {
-        conf.set(kv.getKey(), kv.getValue());
-      }
+    /**
+     * This constructor will be used at master node
+     *
+     * @param we
+     *          WriteEntity defines where in storage records should be written to.
+     * @param config
+     *          Any configuration which external system wants to communicate to
+     *          HCatalog for performing writes.
+     */
+    protected HCatWriter(final WriteEntity we, final Map<String, String> config) {
+        this(config);
+        this.we = we;
     }
 
-    this.conf = conf;
-  }
+    /**
+     * This constructor will be used at slave nodes.
+     *
+     * @param config
+     */
+    protected HCatWriter(final Configuration config, final StateProvider sp) {
+        this.conf = config;
+        this.sp = sp;
+    }
+
+    private HCatWriter(final Map<String, String> config) {
+        Configuration conf = new Configuration();
+        if (config != null) {
+            // user is providing config, so it could be null.
+            for (Entry<String, String> kv : config.entrySet()) {
+                conf.set(kv.getKey(), kv.getValue());
+            }
+        }
+
+        this.conf = conf;
+    }
 }
diff --git a/src/java/org/apache/hcatalog/data/transfer/ReadEntity.java b/src/java/org/apache/hcatalog/data/transfer/ReadEntity.java
index 0cf5225..6787b10 100644
--- a/src/java/org/apache/hcatalog/data/transfer/ReadEntity.java
+++ b/src/java/org/apache/hcatalog/data/transfer/ReadEntity.java
@@ -22,67 +22,67 @@
 
 public class ReadEntity extends EntityBase.Entity {
 
-  private String filterString;
-
-  /**
-   * Don't instantiate {@link ReadEntity} directly. Use,
-   * {@link ReadEntity.Builder} instead.
-   * 
-   */
-  private ReadEntity() {
-    // Not allowed
-  }
-
-  private ReadEntity(Builder builder) {
-
-    this.region = builder.region;
-    this.dbName = builder.dbName;
-    this.tableName = builder.tableName;
-    this.partitionKVs = builder.partitionKVs;
-    this.filterString = builder.filterString;
-  }
-
-  public String getFilterString() {
-    return this.filterString;
-  }
-
-  /**
-   * This class should be used to build {@link ReadEntity}. It follows builder
-   * pattern, letting you build your {@link ReadEntity} with whatever level of
-   * detail you want.
-   * 
-   */
-  public static class Builder extends EntityBase {
-
     private String filterString;
 
-    public Builder withRegion(final String region) {
-      this.region = region;
-      return this;
+    /**
+     * Don't instantiate {@link ReadEntity} directly. Use,
+     * {@link ReadEntity.Builder} instead.
+     *
+     */
+    private ReadEntity() {
+        // Not allowed
     }
 
-    public Builder withDatabase(final String dbName) {
-      this.dbName = dbName;
-      return this;
+    private ReadEntity(Builder builder) {
+
+        this.region = builder.region;
+        this.dbName = builder.dbName;
+        this.tableName = builder.tableName;
+        this.partitionKVs = builder.partitionKVs;
+        this.filterString = builder.filterString;
     }
 
-    public Builder withTable(final String tblName) {
-      this.tableName = tblName;
-      return this;
+    public String getFilterString() {
+        return this.filterString;
     }
 
-    public Builder withPartition(final Map<String, String> partKVs) {
-      this.partitionKVs = partKVs;
-      return this;
-    }
+    /**
+     * This class should be used to build {@link ReadEntity}. It follows builder
+     * pattern, letting you build your {@link ReadEntity} with whatever level of
+     * detail you want.
+     *
+     */
+    public static class Builder extends EntityBase {
 
-    public Builder withFilter(String filterString) {
-      this.filterString = filterString;
-      return this;
-    }
+        private String filterString;
 
-    public ReadEntity build() {
-      return new ReadEntity(this);
+        public Builder withRegion(final String region) {
+            this.region = region;
+            return this;
+        }
+
+        public Builder withDatabase(final String dbName) {
+            this.dbName = dbName;
+            return this;
+        }
+
+        public Builder withTable(final String tblName) {
+            this.tableName = tblName;
+            return this;
+        }
+
+        public Builder withPartition(final Map<String, String> partKVs) {
+            this.partitionKVs = partKVs;
+            return this;
+        }
+
+        public Builder withFilter(String filterString) {
+            this.filterString = filterString;
+            return this;
+        }
+
+        public ReadEntity build() {
+            return new ReadEntity(this);
+        }
     }
-  }
 }
diff --git a/src/java/org/apache/hcatalog/data/transfer/ReaderContext.java b/src/java/org/apache/hcatalog/data/transfer/ReaderContext.java
index da6d1a3..68d653a 100644
--- a/src/java/org/apache/hcatalog/data/transfer/ReaderContext.java
+++ b/src/java/org/apache/hcatalog/data/transfer/ReaderContext.java
@@ -38,51 +38,51 @@
  */
 public class ReaderContext implements Externalizable, Configurable {
 
-  private static final long serialVersionUID = -2656468331739574367L;
-  private List<InputSplit> splits;
-  private Configuration conf;
+    private static final long serialVersionUID = -2656468331739574367L;
+    private List<InputSplit> splits;
+    private Configuration conf;
 
-  public ReaderContext() {
-    this.splits = new ArrayList<InputSplit>();
-    this.conf = new Configuration();
-  }
-
-  public void setInputSplits(final List<InputSplit> splits) {
-    this.splits = splits;
-  }
-
-  public List<InputSplit> getSplits() {
-    return splits;
-  }
-
-  @Override
-  public Configuration getConf() {
-    return conf;
-  }
-
-  @Override
-  public void setConf(final Configuration config) {
-    conf = config;
-  }
-
-  @Override
-  public void writeExternal(ObjectOutput out) throws IOException {
-    conf.write(out);
-    out.writeInt(splits.size());
-    for (InputSplit split : splits) {
-      ((HCatSplit) split).write(out);
+    public ReaderContext() {
+        this.splits = new ArrayList<InputSplit>();
+        this.conf = new Configuration();
     }
-  }
 
-  @Override
-  public void readExternal(ObjectInput in) throws IOException,
-      ClassNotFoundException {
-    conf.readFields(in);
-    int numOfSplits = in.readInt();
-    for (int i = 0; i < numOfSplits; i++) {
-      HCatSplit split = new HCatSplit();
-      split.readFields(in);
-      splits.add(split);
+    public void setInputSplits(final List<InputSplit> splits) {
+        this.splits = splits;
     }
-  }
+
+    public List<InputSplit> getSplits() {
+        return splits;
+    }
+
+    @Override
+    public Configuration getConf() {
+        return conf;
+    }
+
+    @Override
+    public void setConf(final Configuration config) {
+        conf = config;
+    }
+
+    @Override
+    public void writeExternal(ObjectOutput out) throws IOException {
+        conf.write(out);
+        out.writeInt(splits.size());
+        for (InputSplit split : splits) {
+            ((HCatSplit) split).write(out);
+        }
+    }
+
+    @Override
+    public void readExternal(ObjectInput in) throws IOException,
+        ClassNotFoundException {
+        conf.readFields(in);
+        int numOfSplits = in.readInt();
+        for (int i = 0; i < numOfSplits; i++) {
+            HCatSplit split = new HCatSplit();
+            split.readFields(in);
+            splits.add(split);
+        }
+    }
 }
diff --git a/src/java/org/apache/hcatalog/data/transfer/WriteEntity.java b/src/java/org/apache/hcatalog/data/transfer/WriteEntity.java
index 4962bd9..3917a18 100644
--- a/src/java/org/apache/hcatalog/data/transfer/WriteEntity.java
+++ b/src/java/org/apache/hcatalog/data/transfer/WriteEntity.java
@@ -22,53 +22,53 @@
 
 public class WriteEntity extends EntityBase.Entity {
 
-  /**
-   * Don't instantiate {@link WriteEntity} directly. Use, {@link Builder} to
-   * build {@link WriteEntity}.
-   */
+    /**
+     * Don't instantiate {@link WriteEntity} directly. Use, {@link Builder} to
+     * build {@link WriteEntity}.
+     */
 
-  private WriteEntity() {
-    // Not allowed.
-  }
-
-  private WriteEntity(Builder builder) {
-    this.region = builder.region;
-    this.dbName = builder.dbName;
-    this.tableName = builder.tableName;
-    this.partitionKVs = builder.partitionKVs;
-  }
-
-  /**
-   * This class should be used to build {@link WriteEntity}. It follows builder
-   * pattern, letting you build your {@link WriteEntity} with whatever level of
-   * detail you want.
-   * 
-   */
-  public static class Builder extends EntityBase {
-
-    public Builder withRegion(final String region) {
-      this.region = region;
-      return this;
+    private WriteEntity() {
+        // Not allowed.
     }
 
-    public Builder withDatabase(final String dbName) {
-      this.dbName = dbName;
-      return this;
+    private WriteEntity(Builder builder) {
+        this.region = builder.region;
+        this.dbName = builder.dbName;
+        this.tableName = builder.tableName;
+        this.partitionKVs = builder.partitionKVs;
     }
 
-    public Builder withTable(final String tblName) {
-      this.tableName = tblName;
-      return this;
-    }
+    /**
+     * This class should be used to build {@link WriteEntity}. It follows builder
+     * pattern, letting you build your {@link WriteEntity} with whatever level of
+     * detail you want.
+     *
+     */
+    public static class Builder extends EntityBase {
 
-    public Builder withPartition(final Map<String, String> partKVs) {
-      this.partitionKVs = partKVs;
-      return this;
-    }
+        public Builder withRegion(final String region) {
+            this.region = region;
+            return this;
+        }
 
-    public WriteEntity build() {
-      return new WriteEntity(this);
-    }
+        public Builder withDatabase(final String dbName) {
+            this.dbName = dbName;
+            return this;
+        }
 
-  }
+        public Builder withTable(final String tblName) {
+            this.tableName = tblName;
+            return this;
+        }
+
+        public Builder withPartition(final Map<String, String> partKVs) {
+            this.partitionKVs = partKVs;
+            return this;
+        }
+
+        public WriteEntity build() {
+            return new WriteEntity(this);
+        }
+
+    }
 }
diff --git a/src/java/org/apache/hcatalog/data/transfer/WriterContext.java b/src/java/org/apache/hcatalog/data/transfer/WriterContext.java
index 6e67cfd..002ca07 100644
--- a/src/java/org/apache/hcatalog/data/transfer/WriterContext.java
+++ b/src/java/org/apache/hcatalog/data/transfer/WriterContext.java
@@ -34,31 +34,31 @@
  */
 public class WriterContext implements Externalizable, Configurable {
 
-  private static final long serialVersionUID = -5899374262971611840L;
-  private Configuration conf;
+    private static final long serialVersionUID = -5899374262971611840L;
+    private Configuration conf;
 
-  public WriterContext() {
-    conf = new Configuration();
-  }
+    public WriterContext() {
+        conf = new Configuration();
+    }
 
-  @Override
-  public Configuration getConf() {
-    return conf;
-  }
+    @Override
+    public Configuration getConf() {
+        return conf;
+    }
 
-  @Override
-  public void setConf(final Configuration config) {
-    this.conf = config;
-  }
+    @Override
+    public void setConf(final Configuration config) {
+        this.conf = config;
+    }
 
-  @Override
-  public void writeExternal(ObjectOutput out) throws IOException {
-    conf.write(out);
-  }
+    @Override
+    public void writeExternal(ObjectOutput out) throws IOException {
+        conf.write(out);
+    }
 
-  @Override
-  public void readExternal(ObjectInput in) throws IOException,
-      ClassNotFoundException {
-    conf.readFields(in);
-  }
+    @Override
+    public void readExternal(ObjectInput in) throws IOException,
+        ClassNotFoundException {
+        conf.readFields(in);
+    }
 }
diff --git a/src/java/org/apache/hcatalog/data/transfer/impl/HCatInputFormatReader.java b/src/java/org/apache/hcatalog/data/transfer/impl/HCatInputFormatReader.java
index 663a6a6..cf9bba2 100644
--- a/src/java/org/apache/hcatalog/data/transfer/impl/HCatInputFormatReader.java
+++ b/src/java/org/apache/hcatalog/data/transfer/impl/HCatInputFormatReader.java
@@ -42,99 +42,99 @@
 
 /**
  * This reader reads via {@link HCatInputFormat}
- * 
+ *
  */
 public class HCatInputFormatReader extends HCatReader {
 
-  private InputSplit split;
+    private InputSplit split;
 
-  public HCatInputFormatReader(InputSplit split, Configuration config,
-      StateProvider sp) {
-    super(config, sp);
-    this.split = split;
-  }
-
-  public HCatInputFormatReader(ReadEntity info, Map<String, String> config) {
-    super(info, config);
-  }
-
-  @Override
-  public ReaderContext prepareRead() throws HCatException {
-
-    try {
-      Job job = new Job(conf);
-      InputJobInfo jobInfo = InputJobInfo.create(re.getDbName(),
-          re.getTableName(), re.getFilterString());
-      HCatInputFormat.setInput(job, jobInfo);
-      HCatInputFormat hcif = new HCatInputFormat();
-      ReaderContext cntxt = new ReaderContext();
-      cntxt.setInputSplits(hcif.getSplits(
-                  HCatHadoopShims.Instance.get().createJobContext(job.getConfiguration(), null)));
-      cntxt.setConf(job.getConfiguration());
-      return cntxt;
-    } catch (IOException e) {
-      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
-    } catch (InterruptedException e) {
-      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
+    public HCatInputFormatReader(InputSplit split, Configuration config,
+                                 StateProvider sp) {
+        super(config, sp);
+        this.split = split;
     }
-  }
 
-  @Override
-  public Iterator<HCatRecord> read() throws HCatException {
-
-    HCatInputFormat inpFmt = new HCatInputFormat();
-    RecordReader<WritableComparable, HCatRecord> rr;
-    try {
-      TaskAttemptContext cntxt = HCatHadoopShims.Instance.get().createTaskAttemptContext(conf, new TaskAttemptID());
-      rr = inpFmt.createRecordReader(split, cntxt);
-      rr.initialize(split, cntxt);
-    } catch (IOException e) {
-      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
-    } catch (InterruptedException e) {
-      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
-    }
-    return new HCatRecordItr(rr);
-  }
-
-  private static class HCatRecordItr implements Iterator<HCatRecord> {
-
-    private RecordReader<WritableComparable, HCatRecord> curRecReader;
-
-    HCatRecordItr(RecordReader<WritableComparable, HCatRecord> rr) {
-      curRecReader = rr;
+    public HCatInputFormatReader(ReadEntity info, Map<String, String> config) {
+        super(info, config);
     }
 
     @Override
-    public boolean hasNext() {
-      try {
-        boolean retVal = curRecReader.nextKeyValue();
-        if (retVal) {
-          return true;
+    public ReaderContext prepareRead() throws HCatException {
+
+        try {
+            Job job = new Job(conf);
+            InputJobInfo jobInfo = InputJobInfo.create(re.getDbName(),
+                re.getTableName(), re.getFilterString());
+            HCatInputFormat.setInput(job, jobInfo);
+            HCatInputFormat hcif = new HCatInputFormat();
+            ReaderContext cntxt = new ReaderContext();
+            cntxt.setInputSplits(hcif.getSplits(
+                HCatHadoopShims.Instance.get().createJobContext(job.getConfiguration(), null)));
+            cntxt.setConf(job.getConfiguration());
+            return cntxt;
+        } catch (IOException e) {
+            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
+        } catch (InterruptedException e) {
+            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
         }
-        // if its false, we need to close recordReader.
-        curRecReader.close();
-        return false;
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      } catch (InterruptedException e) {
-        throw new RuntimeException(e);
-      }
     }
 
     @Override
-    public HCatRecord next() {
-      try {
-        return curRecReader.getCurrentValue();
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      } catch (InterruptedException e) {
-        throw new RuntimeException(e);
-      }
+    public Iterator<HCatRecord> read() throws HCatException {
+
+        HCatInputFormat inpFmt = new HCatInputFormat();
+        RecordReader<WritableComparable, HCatRecord> rr;
+        try {
+            TaskAttemptContext cntxt = HCatHadoopShims.Instance.get().createTaskAttemptContext(conf, new TaskAttemptID());
+            rr = inpFmt.createRecordReader(split, cntxt);
+            rr.initialize(split, cntxt);
+        } catch (IOException e) {
+            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
+        } catch (InterruptedException e) {
+            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
+        }
+        return new HCatRecordItr(rr);
     }
 
-    @Override
-    public void remove() {
-      throw new UnsupportedOperationException("Not allowed");
+    private static class HCatRecordItr implements Iterator<HCatRecord> {
+
+        private RecordReader<WritableComparable, HCatRecord> curRecReader;
+
+        HCatRecordItr(RecordReader<WritableComparable, HCatRecord> rr) {
+            curRecReader = rr;
+        }
+
+        @Override
+        public boolean hasNext() {
+            try {
+                boolean retVal = curRecReader.nextKeyValue();
+                if (retVal) {
+                    return true;
+                }
+                // if its false, we need to close recordReader.
+                curRecReader.close();
+                return false;
+            } catch (IOException e) {
+                throw new RuntimeException(e);
+            } catch (InterruptedException e) {
+                throw new RuntimeException(e);
+            }
+        }
+
+        @Override
+        public HCatRecord next() {
+            try {
+                return curRecReader.getCurrentValue();
+            } catch (IOException e) {
+                throw new RuntimeException(e);
+            } catch (InterruptedException e) {
+                throw new RuntimeException(e);
+            }
+        }
+
+        @Override
+        public void remove() {
+            throw new UnsupportedOperationException("Not allowed");
+        }
     }
-  }
 }
diff --git a/src/java/org/apache/hcatalog/data/transfer/impl/HCatOutputFormatWriter.java b/src/java/org/apache/hcatalog/data/transfer/impl/HCatOutputFormatWriter.java
index e3489fa..03e59e8 100644
--- a/src/java/org/apache/hcatalog/data/transfer/impl/HCatOutputFormatWriter.java
+++ b/src/java/org/apache/hcatalog/data/transfer/impl/HCatOutputFormatWriter.java
@@ -43,118 +43,118 @@
 
 /**
  * This writer writes via {@link HCatOutputFormat}
- * 
+ *
  */
 public class HCatOutputFormatWriter extends HCatWriter {
 
-  public HCatOutputFormatWriter(WriteEntity we, Map<String, String> config) {
-    super(we, config);
-  }
-
-  public HCatOutputFormatWriter(Configuration config, StateProvider sp) {
-    super(config, sp);
-  }
-
-  @Override
-  public WriterContext prepareWrite() throws HCatException {
-    OutputJobInfo jobInfo = OutputJobInfo.create(we.getDbName(),
-        we.getTableName(), we.getPartitionKVs());
-    Job job;
-    try {
-      job = new Job(conf);
-      HCatOutputFormat.setOutput(job, jobInfo);
-      HCatOutputFormat.setSchema(job, HCatOutputFormat.getTableSchema(job));
-      HCatOutputFormat outFormat = new HCatOutputFormat();
-      outFormat.checkOutputSpecs(job);
-      outFormat.getOutputCommitter(HCatHadoopShims.Instance.get().createTaskAttemptContext
-              (job.getConfiguration(), HCatHadoopShims.Instance.get().createTaskAttemptID())).setupJob(job);
-    } catch (IOException e) {
-      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
-    } catch (InterruptedException e) {
-      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
+    public HCatOutputFormatWriter(WriteEntity we, Map<String, String> config) {
+        super(we, config);
     }
-    WriterContext cntxt = new WriterContext();
-    cntxt.setConf(job.getConfiguration());
-    return cntxt;
-  }
 
-  @Override
-  public void write(Iterator<HCatRecord> recordItr) throws HCatException {
+    public HCatOutputFormatWriter(Configuration config, StateProvider sp) {
+        super(config, sp);
+    }
 
-    int id = sp.getId();
-    setVarsInConf(id);
-    HCatOutputFormat outFormat = new HCatOutputFormat();
-    TaskAttemptContext cntxt = HCatHadoopShims.Instance.get().createTaskAttemptContext
-        (conf, new TaskAttemptID(HCatHadoopShims.Instance.get().createTaskID(), id));
-    OutputCommitter committer = null;
-    RecordWriter<WritableComparable<?>, HCatRecord> writer;
-    try {
-      committer = outFormat.getOutputCommitter(cntxt);
-      committer.setupTask(cntxt);
-      writer = outFormat.getRecordWriter(cntxt);
-      while (recordItr.hasNext()) {
-        HCatRecord rec = recordItr.next();
-        writer.write(null, rec);
-      }
-      writer.close(cntxt);
-      if (committer.needsTaskCommit(cntxt)) {
-        committer.commitTask(cntxt);
-      }
-    } catch (IOException e) {
-      if (null != committer) {
+    @Override
+    public WriterContext prepareWrite() throws HCatException {
+        OutputJobInfo jobInfo = OutputJobInfo.create(we.getDbName(),
+            we.getTableName(), we.getPartitionKVs());
+        Job job;
         try {
-          committer.abortTask(cntxt);
-        } catch (IOException e1) {
-          throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION, e1);
+            job = new Job(conf);
+            HCatOutputFormat.setOutput(job, jobInfo);
+            HCatOutputFormat.setSchema(job, HCatOutputFormat.getTableSchema(job));
+            HCatOutputFormat outFormat = new HCatOutputFormat();
+            outFormat.checkOutputSpecs(job);
+            outFormat.getOutputCommitter(HCatHadoopShims.Instance.get().createTaskAttemptContext
+                (job.getConfiguration(), HCatHadoopShims.Instance.get().createTaskAttemptID())).setupJob(job);
+        } catch (IOException e) {
+            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
+        } catch (InterruptedException e) {
+            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
         }
-      }
-      throw new HCatException("Failed while writing", e);
-    } catch (InterruptedException e) {
-      if (null != committer) {
+        WriterContext cntxt = new WriterContext();
+        cntxt.setConf(job.getConfiguration());
+        return cntxt;
+    }
+
+    @Override
+    public void write(Iterator<HCatRecord> recordItr) throws HCatException {
+
+        int id = sp.getId();
+        setVarsInConf(id);
+        HCatOutputFormat outFormat = new HCatOutputFormat();
+        TaskAttemptContext cntxt = HCatHadoopShims.Instance.get().createTaskAttemptContext
+            (conf, new TaskAttemptID(HCatHadoopShims.Instance.get().createTaskID(), id));
+        OutputCommitter committer = null;
+        RecordWriter<WritableComparable<?>, HCatRecord> writer;
         try {
-          committer.abortTask(cntxt);
-        } catch (IOException e1) {
-          throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION, e1);
+            committer = outFormat.getOutputCommitter(cntxt);
+            committer.setupTask(cntxt);
+            writer = outFormat.getRecordWriter(cntxt);
+            while (recordItr.hasNext()) {
+                HCatRecord rec = recordItr.next();
+                writer.write(null, rec);
+            }
+            writer.close(cntxt);
+            if (committer.needsTaskCommit(cntxt)) {
+                committer.commitTask(cntxt);
+            }
+        } catch (IOException e) {
+            if (null != committer) {
+                try {
+                    committer.abortTask(cntxt);
+                } catch (IOException e1) {
+                    throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION, e1);
+                }
+            }
+            throw new HCatException("Failed while writing", e);
+        } catch (InterruptedException e) {
+            if (null != committer) {
+                try {
+                    committer.abortTask(cntxt);
+                } catch (IOException e1) {
+                    throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION, e1);
+                }
+            }
+            throw new HCatException("Failed while writing", e);
         }
-      }
-      throw new HCatException("Failed while writing", e);
     }
-  }
 
-  @Override
-  public void commit(WriterContext context) throws HCatException {
-    try {
-      new HCatOutputFormat().getOutputCommitter(HCatHadoopShims.Instance.get().createTaskAttemptContext
-              (context.getConf(), HCatHadoopShims.Instance.get().createTaskAttemptID()))
-          .commitJob(HCatHadoopShims.Instance.get().createJobContext(context.getConf(), null));
-    } catch (IOException e) {
-      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
-    } catch (InterruptedException e) {
-      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
+    @Override
+    public void commit(WriterContext context) throws HCatException {
+        try {
+            new HCatOutputFormat().getOutputCommitter(HCatHadoopShims.Instance.get().createTaskAttemptContext
+                (context.getConf(), HCatHadoopShims.Instance.get().createTaskAttemptID()))
+                .commitJob(HCatHadoopShims.Instance.get().createJobContext(context.getConf(), null));
+        } catch (IOException e) {
+            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
+        } catch (InterruptedException e) {
+            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
+        }
     }
-  }
 
-  @Override
-  public void abort(WriterContext context) throws HCatException {
-    try {
-      new HCatOutputFormat().getOutputCommitter(HCatHadoopShims.Instance.get().createTaskAttemptContext
-              (context.getConf(), HCatHadoopShims.Instance.get().createTaskAttemptID()))
-          .abortJob(HCatHadoopShims.Instance.get().createJobContext(context.getConf(), null),State.FAILED);
-    } catch (IOException e) {
-      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
-    } catch (InterruptedException e) {
-      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
+    @Override
+    public void abort(WriterContext context) throws HCatException {
+        try {
+            new HCatOutputFormat().getOutputCommitter(HCatHadoopShims.Instance.get().createTaskAttemptContext
+                (context.getConf(), HCatHadoopShims.Instance.get().createTaskAttemptID()))
+                .abortJob(HCatHadoopShims.Instance.get().createJobContext(context.getConf(), null), State.FAILED);
+        } catch (IOException e) {
+            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
+        } catch (InterruptedException e) {
+            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
+        }
     }
-  }
 
-  private void setVarsInConf(int id) {
+    private void setVarsInConf(int id) {
 
-    // Following two config keys are required by FileOutputFormat to work
-    // correctly.
-    // In usual case of Hadoop, JobTracker will set these before launching
-    // tasks.
-    // Since there is no jobtracker here, we set it ourself.
-    conf.setInt("mapred.task.partition", id);
-    conf.set("mapred.task.id", "attempt__0000_r_000000_" + id);
-  }
+        // Following two config keys are required by FileOutputFormat to work
+        // correctly.
+        // In usual case of Hadoop, JobTracker will set these before launching
+        // tasks.
+        // Since there is no jobtracker here, we set it ourself.
+        conf.setInt("mapred.task.partition", id);
+        conf.set("mapred.task.id", "attempt__0000_r_000000_" + id);
+    }
 }
diff --git a/src/java/org/apache/hcatalog/data/transfer/state/DefaultStateProvider.java b/src/java/org/apache/hcatalog/data/transfer/state/DefaultStateProvider.java
index 339244d..e7455a3 100644
--- a/src/java/org/apache/hcatalog/data/transfer/state/DefaultStateProvider.java
+++ b/src/java/org/apache/hcatalog/data/transfer/state/DefaultStateProvider.java
@@ -23,25 +23,25 @@
 
 public class DefaultStateProvider implements StateProvider {
 
-  /**
-   * Default implementation. Here, ids are generated randomly.
-   */
-  @Override
-  public int getId() {
+    /**
+     * Default implementation. Here, ids are generated randomly.
+     */
+    @Override
+    public int getId() {
 
-    NumberFormat numberFormat = NumberFormat.getInstance();
-    numberFormat.setMinimumIntegerDigits(5);
-    numberFormat.setGroupingUsed(false);
-    return Integer
-        .parseInt(numberFormat.format(Math.abs(new Random().nextInt())));
-  }
-
-  private static StateProvider sp;
-
-  public static synchronized StateProvider get() {
-    if (null == sp) {
-      sp = new DefaultStateProvider();
+        NumberFormat numberFormat = NumberFormat.getInstance();
+        numberFormat.setMinimumIntegerDigits(5);
+        numberFormat.setGroupingUsed(false);
+        return Integer
+            .parseInt(numberFormat.format(Math.abs(new Random().nextInt())));
     }
-    return sp;
-  }
+
+    private static StateProvider sp;
+
+    public static synchronized StateProvider get() {
+        if (null == sp) {
+            sp = new DefaultStateProvider();
+        }
+        return sp;
+    }
 }
diff --git a/src/java/org/apache/hcatalog/data/transfer/state/StateProvider.java b/src/java/org/apache/hcatalog/data/transfer/state/StateProvider.java
index 2ab6251..ddf7364 100644
--- a/src/java/org/apache/hcatalog/data/transfer/state/StateProvider.java
+++ b/src/java/org/apache/hcatalog/data/transfer/state/StateProvider.java
@@ -25,10 +25,10 @@
  */
 public interface StateProvider {
 
-  /**
-   * This method should return id assigned to slave node.
-   * 
-   * @return id
-   */
-  public int getId();
+    /**
+     * This method should return id assigned to slave node.
+     *
+     * @return id
+     */
+    public int getId();
 }
diff --git a/src/java/org/apache/hcatalog/har/HarOutputCommitterPostProcessor.java b/src/java/org/apache/hcatalog/har/HarOutputCommitterPostProcessor.java
index 41a81b3..7e4dba4 100644
--- a/src/java/org/apache/hcatalog/har/HarOutputCommitterPostProcessor.java
+++ b/src/java/org/apache/hcatalog/har/HarOutputCommitterPostProcessor.java
@@ -32,93 +32,93 @@
 import org.apache.hcatalog.common.HCatException;
 
 public class HarOutputCommitterPostProcessor {
-  
-  boolean isEnabled = false;
-  
-  public boolean isEnabled() {
-    return isEnabled;
-  }
 
-  public void setEnabled(boolean enabled) {
-    this.isEnabled = enabled;
-  }
+    boolean isEnabled = false;
+
+    public boolean isEnabled() {
+        return isEnabled;
+    }
+
+    public void setEnabled(boolean enabled) {
+        this.isEnabled = enabled;
+    }
 
 
-  public void exec(JobContext context, Partition partition, Path partPath) throws IOException {
+    public void exec(JobContext context, Partition partition, Path partPath) throws IOException {
 //    LOG.info("Archiving partition ["+partPath.toString()+"]");
-    makeHar(context,partPath.toUri().toString(),harFile(partPath));
-    partition.getParameters().put(Constants.IS_ARCHIVED, "true");
-  }
-  
-  public String harFile(Path ptnPath) throws IOException{
-    String harFile = ptnPath.toString().replaceFirst("/+$", "") + ".har";
+        makeHar(context, partPath.toUri().toString(), harFile(partPath));
+        partition.getParameters().put(Constants.IS_ARCHIVED, "true");
+    }
+
+    public String harFile(Path ptnPath) throws IOException {
+        String harFile = ptnPath.toString().replaceFirst("/+$", "") + ".har";
 //    LOG.info("har file : " + harFile);
-    return harFile;
-  }
+        return harFile;
+    }
 
-  public String getParentFSPath(Path ptnPath) throws IOException {
-    return ptnPath.toUri().getPath().replaceFirst("/+$", "");
-  }
+    public String getParentFSPath(Path ptnPath) throws IOException {
+        return ptnPath.toUri().getPath().replaceFirst("/+$", "");
+    }
 
-  public String getProcessedLocation(Path ptnPath) throws IOException {
-    String harLocn = ("har://" + ptnPath.toUri().getPath()).replaceFirst("/+$", "") + ".har" + Path.SEPARATOR;
+    public String getProcessedLocation(Path ptnPath) throws IOException {
+        String harLocn = ("har://" + ptnPath.toUri().getPath()).replaceFirst("/+$", "") + ".har" + Path.SEPARATOR;
 //    LOG.info("har location : " + harLocn);
-    return harLocn;
-  }
-  
+        return harLocn;
+    }
 
-  /**
-   * Creates a har file from the contents of a given directory, using that as root.
-   * @param dir Directory to archive
-   * @param harFile The HAR file to create
-   */
-  public static void makeHar(JobContext context, String dir, String harFile) throws IOException{
+
+    /**
+     * Creates a har file from the contents of a given directory, using that as root.
+     * @param dir Directory to archive
+     * @param harFile The HAR file to create
+     */
+    public static void makeHar(JobContext context, String dir, String harFile) throws IOException {
 //    Configuration conf = context.getConfiguration();
 //    Credentials creds = context.getCredentials();
-    
+
 //    HCatUtil.logAllTokens(LOG,context);
-    
-    int lastSep = harFile.lastIndexOf(Path.SEPARATOR_CHAR);
-    Path archivePath = new Path(harFile.substring(0,lastSep));
-    final String[] args = {
-        "-archiveName",
-        harFile.substring(lastSep+1, harFile.length()),
-        "-p",
-        dir,
-        "*",
-        archivePath.toString()
-    };
+
+        int lastSep = harFile.lastIndexOf(Path.SEPARATOR_CHAR);
+        Path archivePath = new Path(harFile.substring(0, lastSep));
+        final String[] args = {
+            "-archiveName",
+            harFile.substring(lastSep + 1, harFile.length()),
+            "-p",
+            dir,
+            "*",
+            archivePath.toString()
+        };
 //    for (String arg : args){
 //      LOG.info("Args to har : "+ arg);
 //    }
-    try {
-      Configuration newConf = new Configuration();
-      FileSystem fs = archivePath.getFileSystem(newConf);
-      
-      String hadoopTokenFileLocationEnvSetting = System.getenv(HCatConstants.SYSENV_HADOOP_TOKEN_FILE_LOCATION);
-      if ((hadoopTokenFileLocationEnvSetting != null) && (!hadoopTokenFileLocationEnvSetting.isEmpty())){
-        newConf.set(HCatConstants.CONF_MAPREDUCE_JOB_CREDENTIALS_BINARY, hadoopTokenFileLocationEnvSetting);
+        try {
+            Configuration newConf = new Configuration();
+            FileSystem fs = archivePath.getFileSystem(newConf);
+
+            String hadoopTokenFileLocationEnvSetting = System.getenv(HCatConstants.SYSENV_HADOOP_TOKEN_FILE_LOCATION);
+            if ((hadoopTokenFileLocationEnvSetting != null) && (!hadoopTokenFileLocationEnvSetting.isEmpty())) {
+                newConf.set(HCatConstants.CONF_MAPREDUCE_JOB_CREDENTIALS_BINARY, hadoopTokenFileLocationEnvSetting);
 //      LOG.info("System.getenv(\"HADOOP_TOKEN_FILE_LOCATION\") =["+  System.getenv("HADOOP_TOKEN_FILE_LOCATION")+"]");
-      }
+            }
 //      for (FileStatus ds : fs.globStatus(new Path(dir, "*"))){
 //        LOG.info("src : "+ds.getPath().toUri().toString());
 //      }
 
-      final HadoopArchives har = new HadoopArchives(newConf);
-      int rc = ToolRunner.run(har, args);
-      if (rc!= 0){
-        throw new Exception("Har returned error code "+rc);
-      }
+            final HadoopArchives har = new HadoopArchives(newConf);
+            int rc = ToolRunner.run(har, args);
+            if (rc != 0) {
+                throw new Exception("Har returned error code " + rc);
+            }
 
 //      for (FileStatus hs : fs.globStatus(new Path(harFile, "*"))){
 //        LOG.info("dest : "+hs.getPath().toUri().toString());
 //      }
 //      doHarCheck(fs,harFile);
 //      LOG.info("Nuking " + dir);
-      fs.delete(new Path(dir), true);
-    } catch (Exception e){
-      throw new HCatException("Error creating Har ["+harFile+"] from ["+dir+"]", e);
+            fs.delete(new Path(dir), true);
+        } catch (Exception e) {
+            throw new HCatException("Error creating Har [" + harFile + "] from [" + dir + "]", e);
+        }
     }
-  }
 
 }
diff --git a/src/java/org/apache/hcatalog/listener/NotificationListener.java b/src/java/org/apache/hcatalog/listener/NotificationListener.java
index 8529c5a..0660042 100644
--- a/src/java/org/apache/hcatalog/listener/NotificationListener.java
+++ b/src/java/org/apache/hcatalog/listener/NotificationListener.java
@@ -75,299 +75,299 @@
  */
 public class NotificationListener extends MetaStoreEventListener {
 
-  private static final Logger LOG = LoggerFactory.getLogger(NotificationListener.class);
-  protected Session session;
-  protected Connection conn;
+    private static final Logger LOG = LoggerFactory.getLogger(NotificationListener.class);
+    protected Session session;
+    protected Connection conn;
 
-  /**
-   * Create message bus connection and session in constructor.
-   */
-  public NotificationListener(final Configuration conf) {
+    /**
+     * Create message bus connection and session in constructor.
+     */
+    public NotificationListener(final Configuration conf) {
 
-    super(conf);
-    createConnection();
-  }
-
-  private static String getTopicName(Partition partition,
-      ListenerEvent partitionEvent) throws MetaException {
-    try {
-      return partitionEvent.getHandler()
-          .get_table(partition.getDbName(), partition.getTableName())
-          .getParameters().get(HCatConstants.HCAT_MSGBUS_TOPIC_NAME);
-    } catch (NoSuchObjectException e) {
-      throw new MetaException(e.toString());
-    }
-  }
-
-  @Override
-  public void onAddPartition(AddPartitionEvent partitionEvent)
-      throws MetaException {
-    // Subscriber can get notification of newly add partition in a
-    // particular table by listening on a topic named "dbName.tableName"
-    // and message selector string as "HCAT_EVENT = HCAT_ADD_PARTITION"
-    if (partitionEvent.getStatus()) {
-
-      Partition partition = partitionEvent.getPartition();
-      String topicName = getTopicName(partition, partitionEvent);
-      if (topicName != null && !topicName.equals("")) {
-        send(partition, topicName, HCatConstants.HCAT_ADD_PARTITION_EVENT);
-      } else {
-        LOG.info("Topic name not found in metastore. Suppressing HCatalog notification for "
-            + partition.getDbName()
-            + "."
-            + partition.getTableName()
-            + " To enable notifications for this table, please do alter table set properties ("
-            + HCatConstants.HCAT_MSGBUS_TOPIC_NAME
-            + "=<dbname>.<tablename>) or whatever you want topic name to be.");
-      }
-    }
-
-  }
-
-  @Override
-  public void onDropPartition(DropPartitionEvent partitionEvent)
-      throws MetaException {
-    // Subscriber can get notification of dropped partition in a
-    // particular table by listening on a topic named "dbName.tableName"
-    // and message selector string as "HCAT_EVENT = HCAT_DROP_PARTITION"
-
-    // Datanucleus throws NPE when we try to serialize a partition object
-    // retrieved from metastore. To workaround that we reset following objects
-
-    if (partitionEvent.getStatus()) {
-      Partition partition = partitionEvent.getPartition();
-      StorageDescriptor sd = partition.getSd();
-      sd.setBucketCols(new ArrayList<String>());
-      sd.setSortCols(new ArrayList<Order>());
-      sd.setParameters(new HashMap<String, String>());
-      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-      String topicName = getTopicName(partition, partitionEvent);
-      if (topicName != null && !topicName.equals("")) {
-        send(partition, topicName, HCatConstants.HCAT_DROP_PARTITION_EVENT);
-      } else {
-        LOG.info("Topic name not found in metastore. Suppressing HCatalog notification for "
-            + partition.getDbName()
-            + "."
-            + partition.getTableName()
-            + " To enable notifications for this table, please do alter table set properties ("
-            + HCatConstants.HCAT_MSGBUS_TOPIC_NAME
-            + "=<dbname>.<tablename>) or whatever you want topic name to be.");
-      }
-    }
-  }
-
-  @Override
-  public void onCreateDatabase(CreateDatabaseEvent dbEvent)
-      throws MetaException {
-    // Subscriber can get notification about addition of a database in HCAT
-    // by listening on a topic named "HCAT" and message selector string
-    // as "HCAT_EVENT = HCAT_ADD_DATABASE"
-    if (dbEvent.getStatus())
-      send(dbEvent.getDatabase(), getTopicPrefix(dbEvent.getHandler()
-          .getHiveConf()), HCatConstants.HCAT_ADD_DATABASE_EVENT);
-  }
-
-  @Override
-  public void onDropDatabase(DropDatabaseEvent dbEvent) throws MetaException {
-    // Subscriber can get notification about drop of a database in HCAT
-    // by listening on a topic named "HCAT" and message selector string
-    // as "HCAT_EVENT = HCAT_DROP_DATABASE"
-    if (dbEvent.getStatus())
-      send(dbEvent.getDatabase(), getTopicPrefix(dbEvent.getHandler()
-          .getHiveConf()), HCatConstants.HCAT_DROP_DATABASE_EVENT);
-  }
-
-  @Override
-  public void onCreateTable(CreateTableEvent tableEvent) throws MetaException {
-    // Subscriber can get notification about addition of a table in HCAT
-    // by listening on a topic named "HCAT" and message selector string
-    // as "HCAT_EVENT = HCAT_ADD_TABLE"
-    if (tableEvent.getStatus()) {
-      Table tbl = tableEvent.getTable();
-      HMSHandler handler = tableEvent.getHandler();
-      HiveConf conf = handler.getHiveConf();
-      Table newTbl;
-      try {
-        newTbl = handler.get_table(tbl.getDbName(), tbl.getTableName())
-            .deepCopy();
-        newTbl.getParameters().put(
-            HCatConstants.HCAT_MSGBUS_TOPIC_NAME,
-            getTopicPrefix(conf) + "." + newTbl.getDbName().toLowerCase() + "."
-                + newTbl.getTableName().toLowerCase());
-        handler.alter_table(newTbl.getDbName(), newTbl.getTableName(), newTbl);
-      } catch (InvalidOperationException e) {
-        MetaException me = new MetaException(e.toString());
-        me.initCause(e);
-        throw me;
-      } catch (NoSuchObjectException e) {
-        MetaException me = new MetaException(e.toString());
-        me.initCause(e);
-        throw me;
-      }
-      send(newTbl, getTopicPrefix(conf) + "."
-          + newTbl.getDbName().toLowerCase(),
-          HCatConstants.HCAT_ADD_TABLE_EVENT);
-    }
-  }
-
-  private String getTopicPrefix(HiveConf conf) {
-    return conf.get(HCatConstants.HCAT_MSGBUS_TOPIC_PREFIX,
-        HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX);
-  }
-
-  @Override
-  public void onDropTable(DropTableEvent tableEvent) throws MetaException {
-    // Subscriber can get notification about drop of a table in HCAT
-    // by listening on a topic named "HCAT" and message selector string
-    // as "HCAT_EVENT = HCAT_DROP_TABLE"
-
-    // Datanucleus throws NPE when we try to serialize a table object
-    // retrieved from metastore. To workaround that we reset following objects
-
-    if (tableEvent.getStatus()) {
-      Table table = tableEvent.getTable();
-      StorageDescriptor sd = table.getSd();
-      sd.setBucketCols(new ArrayList<String>());
-      sd.setSortCols(new ArrayList<Order>());
-      sd.setParameters(new HashMap<String, String>());
-      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-      send(table, getTopicPrefix(tableEvent.getHandler().getHiveConf()) + "."
-          + table.getDbName().toLowerCase(),
-          HCatConstants.HCAT_DROP_TABLE_EVENT);
-    }
-  }
-
-  /**
-   * @param msgBody
-   *          is the metastore object. It is sent in full such that if
-   *          subscriber is really interested in details, it can reconstruct it
-   *          fully. In case of finalize_partition message this will be string
-   *          specification of the partition.
-   * @param topicName
-   *          is the name on message broker on which message is sent.
-   * @param event
-   *          is the value of HCAT_EVENT property in message. It can be used to
-   *          select messages in client side.
-   */
-  protected void send(Object msgBody, String topicName, String event) {
-
-    try {
-
-      Destination topic = null;
-      if (null == session) {
-        // this will happen, if we never able to establish a connection.
+        super(conf);
         createConnection();
-        if (null == session) {
-          // Still not successful, return from here.
-          LOG.error("Invalid session. Failed to send message on topic: "
-              + topicName + " event: " + event);
-          return;
-        }
-      }
-      try {
-        // Topics are created on demand. If it doesn't exist on broker it will
-        // be created when broker receives this message.
-        topic = session.createTopic(topicName);
-      } catch (IllegalStateException ise) {
-        // this will happen if we were able to establish connection once, but
-        // its no longer valid,
-        // ise is thrown, catch it and retry.
-        LOG.error("Seems like connection is lost. Retrying", ise);
-        createConnection();
-        topic = session.createTopic(topicName);
-      }
-      if (null == topic) {
-        // Still not successful, return from here.
-        LOG.error("Invalid session. Failed to send message on topic: "
-            + topicName + " event: " + event);
-        return;
-      }
-      MessageProducer producer = session.createProducer(topic);
-      Message msg;
-      if (msgBody instanceof Map) {
-        MapMessage mapMsg = session.createMapMessage();
-        Map<String, String> incomingMap = (Map<String, String>) msgBody;
-        for (Entry<String, String> partCol : incomingMap.entrySet()) {
-          mapMsg.setString(partCol.getKey(), partCol.getValue());
-        }
-        msg = mapMsg;
-      } else {
-        msg = session.createObjectMessage((Serializable) msgBody);
-      }
-
-      msg.setStringProperty(HCatConstants.HCAT_EVENT, event);
-      producer.send(msg);
-      // Message must be transacted before we return.
-      session.commit();
-    } catch (Exception e) {
-      // Gobble up the exception. Message delivery is best effort.
-      LOG.error("Failed to send message on topic: " + topicName + " event: "
-          + event, e);
     }
-  }
 
-  protected void createConnection() {
-
-    Context jndiCntxt;
-    try {
-      jndiCntxt = new InitialContext();
-      ConnectionFactory connFac = (ConnectionFactory) jndiCntxt
-          .lookup("ConnectionFactory");
-      Connection conn = connFac.createConnection();
-      conn.start();
-      conn.setExceptionListener(new ExceptionListener() {
-        @Override
-        public void onException(JMSException jmse) {
-          LOG.error(jmse.toString());
+    private static String getTopicName(Partition partition,
+                                       ListenerEvent partitionEvent) throws MetaException {
+        try {
+            return partitionEvent.getHandler()
+                .get_table(partition.getDbName(), partition.getTableName())
+                .getParameters().get(HCatConstants.HCAT_MSGBUS_TOPIC_NAME);
+        } catch (NoSuchObjectException e) {
+            throw new MetaException(e.toString());
         }
-      });
-      // We want message to be sent when session commits, thus we run in
-      // transacted mode.
-      session = conn.createSession(true, Session.SESSION_TRANSACTED);
-    } catch (NamingException e) {
-      LOG.error("JNDI error while setting up Message Bus connection. "
-          + "Please make sure file named 'jndi.properties' is in "
-          + "classpath and contains appropriate key-value pairs.", e);
-    } catch (JMSException e) {
-      LOG.error("Failed to initialize connection to message bus", e);
-    } catch (Throwable t) {
-      LOG.error("Unable to connect to JMS provider", t);
     }
-  }
 
-  @Override
-  protected void finalize() throws Throwable {
-    // Close the connection before dying.
-    try {
-      if (null != session)
-        session.close();
-      if (conn != null) {
-        conn.close();
-      }
+    @Override
+    public void onAddPartition(AddPartitionEvent partitionEvent)
+        throws MetaException {
+        // Subscriber can get notification of newly add partition in a
+        // particular table by listening on a topic named "dbName.tableName"
+        // and message selector string as "HCAT_EVENT = HCAT_ADD_PARTITION"
+        if (partitionEvent.getStatus()) {
 
-    } catch (Exception ignore) {
-      LOG.info("Failed to close message bus connection.", ignore);
+            Partition partition = partitionEvent.getPartition();
+            String topicName = getTopicName(partition, partitionEvent);
+            if (topicName != null && !topicName.equals("")) {
+                send(partition, topicName, HCatConstants.HCAT_ADD_PARTITION_EVENT);
+            } else {
+                LOG.info("Topic name not found in metastore. Suppressing HCatalog notification for "
+                    + partition.getDbName()
+                    + "."
+                    + partition.getTableName()
+                    + " To enable notifications for this table, please do alter table set properties ("
+                    + HCatConstants.HCAT_MSGBUS_TOPIC_NAME
+                    + "=<dbname>.<tablename>) or whatever you want topic name to be.");
+            }
+        }
+
     }
-  }
 
-  @Override
-  public void onLoadPartitionDone(LoadPartitionDoneEvent lpde)
-      throws MetaException {
-    if (lpde.getStatus())
-      send(
-          lpde.getPartitionName(),
-          lpde.getTable().getParameters()
-              .get(HCatConstants.HCAT_MSGBUS_TOPIC_NAME),
-          HCatConstants.HCAT_PARTITION_DONE_EVENT);
-  }
+    @Override
+    public void onDropPartition(DropPartitionEvent partitionEvent)
+        throws MetaException {
+        // Subscriber can get notification of dropped partition in a
+        // particular table by listening on a topic named "dbName.tableName"
+        // and message selector string as "HCAT_EVENT = HCAT_DROP_PARTITION"
 
-  @Override
-  public void onAlterPartition(AlterPartitionEvent ape) throws MetaException {
-    // no-op
-  }
+        // Datanucleus throws NPE when we try to serialize a partition object
+        // retrieved from metastore. To workaround that we reset following objects
 
-  @Override
-  public void onAlterTable(AlterTableEvent ate) throws MetaException {
-    // no-op
-  }
+        if (partitionEvent.getStatus()) {
+            Partition partition = partitionEvent.getPartition();
+            StorageDescriptor sd = partition.getSd();
+            sd.setBucketCols(new ArrayList<String>());
+            sd.setSortCols(new ArrayList<Order>());
+            sd.setParameters(new HashMap<String, String>());
+            sd.getSerdeInfo().setParameters(new HashMap<String, String>());
+            String topicName = getTopicName(partition, partitionEvent);
+            if (topicName != null && !topicName.equals("")) {
+                send(partition, topicName, HCatConstants.HCAT_DROP_PARTITION_EVENT);
+            } else {
+                LOG.info("Topic name not found in metastore. Suppressing HCatalog notification for "
+                    + partition.getDbName()
+                    + "."
+                    + partition.getTableName()
+                    + " To enable notifications for this table, please do alter table set properties ("
+                    + HCatConstants.HCAT_MSGBUS_TOPIC_NAME
+                    + "=<dbname>.<tablename>) or whatever you want topic name to be.");
+            }
+        }
+    }
+
+    @Override
+    public void onCreateDatabase(CreateDatabaseEvent dbEvent)
+        throws MetaException {
+        // Subscriber can get notification about addition of a database in HCAT
+        // by listening on a topic named "HCAT" and message selector string
+        // as "HCAT_EVENT = HCAT_ADD_DATABASE"
+        if (dbEvent.getStatus())
+            send(dbEvent.getDatabase(), getTopicPrefix(dbEvent.getHandler()
+                .getHiveConf()), HCatConstants.HCAT_ADD_DATABASE_EVENT);
+    }
+
+    @Override
+    public void onDropDatabase(DropDatabaseEvent dbEvent) throws MetaException {
+        // Subscriber can get notification about drop of a database in HCAT
+        // by listening on a topic named "HCAT" and message selector string
+        // as "HCAT_EVENT = HCAT_DROP_DATABASE"
+        if (dbEvent.getStatus())
+            send(dbEvent.getDatabase(), getTopicPrefix(dbEvent.getHandler()
+                .getHiveConf()), HCatConstants.HCAT_DROP_DATABASE_EVENT);
+    }
+
+    @Override
+    public void onCreateTable(CreateTableEvent tableEvent) throws MetaException {
+        // Subscriber can get notification about addition of a table in HCAT
+        // by listening on a topic named "HCAT" and message selector string
+        // as "HCAT_EVENT = HCAT_ADD_TABLE"
+        if (tableEvent.getStatus()) {
+            Table tbl = tableEvent.getTable();
+            HMSHandler handler = tableEvent.getHandler();
+            HiveConf conf = handler.getHiveConf();
+            Table newTbl;
+            try {
+                newTbl = handler.get_table(tbl.getDbName(), tbl.getTableName())
+                    .deepCopy();
+                newTbl.getParameters().put(
+                    HCatConstants.HCAT_MSGBUS_TOPIC_NAME,
+                    getTopicPrefix(conf) + "." + newTbl.getDbName().toLowerCase() + "."
+                        + newTbl.getTableName().toLowerCase());
+                handler.alter_table(newTbl.getDbName(), newTbl.getTableName(), newTbl);
+            } catch (InvalidOperationException e) {
+                MetaException me = new MetaException(e.toString());
+                me.initCause(e);
+                throw me;
+            } catch (NoSuchObjectException e) {
+                MetaException me = new MetaException(e.toString());
+                me.initCause(e);
+                throw me;
+            }
+            send(newTbl, getTopicPrefix(conf) + "."
+                + newTbl.getDbName().toLowerCase(),
+                HCatConstants.HCAT_ADD_TABLE_EVENT);
+        }
+    }
+
+    private String getTopicPrefix(HiveConf conf) {
+        return conf.get(HCatConstants.HCAT_MSGBUS_TOPIC_PREFIX,
+            HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX);
+    }
+
+    @Override
+    public void onDropTable(DropTableEvent tableEvent) throws MetaException {
+        // Subscriber can get notification about drop of a table in HCAT
+        // by listening on a topic named "HCAT" and message selector string
+        // as "HCAT_EVENT = HCAT_DROP_TABLE"
+
+        // Datanucleus throws NPE when we try to serialize a table object
+        // retrieved from metastore. To workaround that we reset following objects
+
+        if (tableEvent.getStatus()) {
+            Table table = tableEvent.getTable();
+            StorageDescriptor sd = table.getSd();
+            sd.setBucketCols(new ArrayList<String>());
+            sd.setSortCols(new ArrayList<Order>());
+            sd.setParameters(new HashMap<String, String>());
+            sd.getSerdeInfo().setParameters(new HashMap<String, String>());
+            send(table, getTopicPrefix(tableEvent.getHandler().getHiveConf()) + "."
+                + table.getDbName().toLowerCase(),
+                HCatConstants.HCAT_DROP_TABLE_EVENT);
+        }
+    }
+
+    /**
+     * @param msgBody
+     *          is the metastore object. It is sent in full such that if
+     *          subscriber is really interested in details, it can reconstruct it
+     *          fully. In case of finalize_partition message this will be string
+     *          specification of the partition.
+     * @param topicName
+     *          is the name on message broker on which message is sent.
+     * @param event
+     *          is the value of HCAT_EVENT property in message. It can be used to
+     *          select messages in client side.
+     */
+    protected void send(Object msgBody, String topicName, String event) {
+
+        try {
+
+            Destination topic = null;
+            if (null == session) {
+                // this will happen, if we never able to establish a connection.
+                createConnection();
+                if (null == session) {
+                    // Still not successful, return from here.
+                    LOG.error("Invalid session. Failed to send message on topic: "
+                        + topicName + " event: " + event);
+                    return;
+                }
+            }
+            try {
+                // Topics are created on demand. If it doesn't exist on broker it will
+                // be created when broker receives this message.
+                topic = session.createTopic(topicName);
+            } catch (IllegalStateException ise) {
+                // this will happen if we were able to establish connection once, but
+                // its no longer valid,
+                // ise is thrown, catch it and retry.
+                LOG.error("Seems like connection is lost. Retrying", ise);
+                createConnection();
+                topic = session.createTopic(topicName);
+            }
+            if (null == topic) {
+                // Still not successful, return from here.
+                LOG.error("Invalid session. Failed to send message on topic: "
+                    + topicName + " event: " + event);
+                return;
+            }
+            MessageProducer producer = session.createProducer(topic);
+            Message msg;
+            if (msgBody instanceof Map) {
+                MapMessage mapMsg = session.createMapMessage();
+                Map<String, String> incomingMap = (Map<String, String>) msgBody;
+                for (Entry<String, String> partCol : incomingMap.entrySet()) {
+                    mapMsg.setString(partCol.getKey(), partCol.getValue());
+                }
+                msg = mapMsg;
+            } else {
+                msg = session.createObjectMessage((Serializable) msgBody);
+            }
+
+            msg.setStringProperty(HCatConstants.HCAT_EVENT, event);
+            producer.send(msg);
+            // Message must be transacted before we return.
+            session.commit();
+        } catch (Exception e) {
+            // Gobble up the exception. Message delivery is best effort.
+            LOG.error("Failed to send message on topic: " + topicName + " event: "
+                + event, e);
+        }
+    }
+
+    protected void createConnection() {
+
+        Context jndiCntxt;
+        try {
+            jndiCntxt = new InitialContext();
+            ConnectionFactory connFac = (ConnectionFactory) jndiCntxt
+                .lookup("ConnectionFactory");
+            Connection conn = connFac.createConnection();
+            conn.start();
+            conn.setExceptionListener(new ExceptionListener() {
+                @Override
+                public void onException(JMSException jmse) {
+                    LOG.error(jmse.toString());
+                }
+            });
+            // We want message to be sent when session commits, thus we run in
+            // transacted mode.
+            session = conn.createSession(true, Session.SESSION_TRANSACTED);
+        } catch (NamingException e) {
+            LOG.error("JNDI error while setting up Message Bus connection. "
+                + "Please make sure file named 'jndi.properties' is in "
+                + "classpath and contains appropriate key-value pairs.", e);
+        } catch (JMSException e) {
+            LOG.error("Failed to initialize connection to message bus", e);
+        } catch (Throwable t) {
+            LOG.error("Unable to connect to JMS provider", t);
+        }
+    }
+
+    @Override
+    protected void finalize() throws Throwable {
+        // Close the connection before dying.
+        try {
+            if (null != session)
+                session.close();
+            if (conn != null) {
+                conn.close();
+            }
+
+        } catch (Exception ignore) {
+            LOG.info("Failed to close message bus connection.", ignore);
+        }
+    }
+
+    @Override
+    public void onLoadPartitionDone(LoadPartitionDoneEvent lpde)
+        throws MetaException {
+        if (lpde.getStatus())
+            send(
+                lpde.getPartitionName(),
+                lpde.getTable().getParameters()
+                    .get(HCatConstants.HCAT_MSGBUS_TOPIC_NAME),
+                HCatConstants.HCAT_PARTITION_DONE_EVENT);
+    }
+
+    @Override
+    public void onAlterPartition(AlterPartitionEvent ape) throws MetaException {
+        // no-op
+    }
+
+    @Override
+    public void onAlterTable(AlterTableEvent ate) throws MetaException {
+        // no-op
+    }
 }
diff --git a/src/java/org/apache/hcatalog/mapreduce/DefaultOutputCommitterContainer.java b/src/java/org/apache/hcatalog/mapreduce/DefaultOutputCommitterContainer.java
index cc0ae9e..3b1df91 100644
--- a/src/java/org/apache/hcatalog/mapreduce/DefaultOutputCommitterContainer.java
+++ b/src/java/org/apache/hcatalog/mapreduce/DefaultOutputCommitterContainer.java
@@ -45,7 +45,7 @@
      * @throws IOException
      */
     public DefaultOutputCommitterContainer(JobContext context, org.apache.hadoop.mapred.OutputCommitter baseCommitter) throws IOException {
-        super(context,baseCommitter);
+        super(context, baseCommitter);
     }
 
     @Override
@@ -95,8 +95,8 @@
             HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
             client = HCatUtil.getHiveClient(hiveConf);
             String tokenStrForm = client.getTokenStrForm();
-            if(tokenStrForm != null && context.getConfiguration().get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
-              client.cancelDelegationToken(tokenStrForm);
+            if (tokenStrForm != null && context.getConfiguration().get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
+                client.cancelDelegationToken(tokenStrForm);
             }
         } catch (Exception e) {
             LOG.warn("Failed to cancel delegation token", e);
diff --git a/src/java/org/apache/hcatalog/mapreduce/DefaultOutputFormatContainer.java b/src/java/org/apache/hcatalog/mapreduce/DefaultOutputFormatContainer.java
index e05fd01..a87e7bb 100644
--- a/src/java/org/apache/hcatalog/mapreduce/DefaultOutputFormatContainer.java
+++ b/src/java/org/apache/hcatalog/mapreduce/DefaultOutputFormatContainer.java
@@ -42,8 +42,8 @@
     private static final NumberFormat NUMBER_FORMAT = NumberFormat.getInstance();
 
     static {
-      NUMBER_FORMAT.setMinimumIntegerDigits(5);
-      NUMBER_FORMAT.setGroupingUsed(false);
+        NUMBER_FORMAT.setMinimumIntegerDigits(5);
+        NUMBER_FORMAT.setGroupingUsed(false);
     }
 
     public DefaultOutputFormatContainer(org.apache.hadoop.mapred.OutputFormat<WritableComparable<?>, Writable> of) {
@@ -52,7 +52,7 @@
 
     static synchronized String getOutputName(int partition) {
         return "part-" + NUMBER_FORMAT.format(partition);
-      }
+    }
 
     /**
      * Get the record writer for the job. Uses the storagehandler's OutputFormat
@@ -66,7 +66,7 @@
     getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException {
         String name = getOutputName(context.getTaskAttemptID().getTaskID().getId());
         return new DefaultRecordWriterContainer(context,
-                getBaseOutputFormat().getRecordWriter(null, new JobConf(context.getConfiguration()), name, InternalUtil.createReporter(context)));
+            getBaseOutputFormat().getRecordWriter(null, new JobConf(context.getConfiguration()), name, InternalUtil.createReporter(context)));
     }
 
 
@@ -80,7 +80,7 @@
      */
     @Override
     public OutputCommitter getOutputCommitter(TaskAttemptContext context)
-            throws IOException, InterruptedException {
+        throws IOException, InterruptedException {
         return new DefaultOutputCommitterContainer(context, new JobConf(context.getConfiguration()).getOutputCommitter());
     }
 
@@ -93,7 +93,7 @@
     public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException {
         org.apache.hadoop.mapred.OutputFormat<? super WritableComparable<?>, ? super Writable> outputFormat = getBaseOutputFormat();
         JobConf jobConf = new JobConf(context.getConfiguration());
-        outputFormat.checkOutputSpecs(null,jobConf);
+        outputFormat.checkOutputSpecs(null, jobConf);
         HCatUtil.copyConf(jobConf, context.getConfiguration());
     }
 
diff --git a/src/java/org/apache/hcatalog/mapreduce/DefaultRecordWriterContainer.java b/src/java/org/apache/hcatalog/mapreduce/DefaultRecordWriterContainer.java
index 884791d..49cf05e 100644
--- a/src/java/org/apache/hcatalog/mapreduce/DefaultRecordWriterContainer.java
+++ b/src/java/org/apache/hcatalog/mapreduce/DefaultRecordWriterContainer.java
@@ -49,32 +49,32 @@
      */
     public DefaultRecordWriterContainer(TaskAttemptContext context,
                                         org.apache.hadoop.mapred.RecordWriter<? super WritableComparable<?>, ? super Writable> baseRecordWriter) throws IOException, InterruptedException {
-        super(context,baseRecordWriter);
+        super(context, baseRecordWriter);
         jobInfo = HCatOutputFormat.getJobInfo(context);
         storageHandler = HCatUtil.getStorageHandler(context.getConfiguration(), jobInfo.getTableInfo().getStorerInfo());
         HCatOutputFormat.configureOutputStorageHandler(context);
-        serDe = ReflectionUtils.newInstance(storageHandler.getSerDeClass(),context.getConfiguration());
+        serDe = ReflectionUtils.newInstance(storageHandler.getSerDeClass(), context.getConfiguration());
         hcatRecordOI = InternalUtil.createStructObjectInspector(jobInfo.getOutputSchema());
         try {
             InternalUtil.initializeOutputSerDe(serDe, context.getConfiguration(), jobInfo);
         } catch (SerDeException e) {
-            throw new IOException("Failed to initialize SerDe",e);
+            throw new IOException("Failed to initialize SerDe", e);
         }
     }
 
     @Override
     public void close(TaskAttemptContext context) throws IOException,
-            InterruptedException {
+        InterruptedException {
         getBaseRecordWriter().close(InternalUtil.createReporter(context));
     }
 
     @Override
     public void write(WritableComparable<?> key, HCatRecord value) throws IOException,
-            InterruptedException {
+        InterruptedException {
         try {
             getBaseRecordWriter().write(null, serDe.serialize(value.getAll(), hcatRecordOI));
         } catch (SerDeException e) {
-            throw new IOException("Failed to serialize object",e);
+            throw new IOException("Failed to serialize object", e);
         }
     }
 
diff --git a/src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java b/src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java
index e593842..8e692dc 100644
--- a/src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java
+++ b/src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java
@@ -87,34 +87,34 @@
      * @throws IOException
      */
     public FileOutputCommitterContainer(JobContext context,
-                                                          org.apache.hadoop.mapred.OutputCommitter baseCommitter) throws IOException {
+                                        org.apache.hadoop.mapred.OutputCommitter baseCommitter) throws IOException {
         super(context, baseCommitter);
         jobInfo = HCatOutputFormat.getJobInfo(context);
         dynamicPartitioningUsed = jobInfo.isDynamicPartitioningUsed();
 
         this.partitionsDiscovered = !dynamicPartitioningUsed;
-        cachedStorageHandler = HCatUtil.getStorageHandler(context.getConfiguration(),jobInfo.getTableInfo().getStorerInfo());
+        cachedStorageHandler = HCatUtil.getStorageHandler(context.getConfiguration(), jobInfo.getTableInfo().getStorerInfo());
     }
 
     @Override
     public void abortTask(TaskAttemptContext context) throws IOException {
-        if (!dynamicPartitioningUsed){
+        if (!dynamicPartitioningUsed) {
             getBaseOutputCommitter().abortTask(HCatMapRedUtil.createTaskAttemptContext(context));
         }
     }
 
     @Override
     public void commitTask(TaskAttemptContext context) throws IOException {
-        if (!dynamicPartitioningUsed){
+        if (!dynamicPartitioningUsed) {
             getBaseOutputCommitter().commitTask(HCatMapRedUtil.createTaskAttemptContext(context));
         }
     }
 
     @Override
     public boolean needsTaskCommit(TaskAttemptContext context) throws IOException {
-        if (!dynamicPartitioningUsed){
+        if (!dynamicPartitioningUsed) {
             return getBaseOutputCommitter().needsTaskCommit(HCatMapRedUtil.createTaskAttemptContext(context));
-        }else{
+        } else {
             // called explicitly through FileRecordWriterContainer.close() if dynamic - return false by default
             return false;
         }
@@ -122,7 +122,7 @@
 
     @Override
     public void setupJob(JobContext context) throws IOException {
-        if(getBaseOutputCommitter() != null && !dynamicPartitioningUsed) {
+        if (getBaseOutputCommitter() != null && !dynamicPartitioningUsed) {
             getBaseOutputCommitter().setupJob(HCatMapRedUtil.createJobContext(context));
         }
         // in dynamic usecase, called through FileRecordWriterContainer
@@ -130,7 +130,7 @@
 
     @Override
     public void setupTask(TaskAttemptContext context) throws IOException {
-        if (!dynamicPartitioningUsed){
+        if (!dynamicPartitioningUsed) {
             getBaseOutputCommitter().setupTask(HCatMapRedUtil.createTaskAttemptContext(context));
         }
     }
@@ -138,16 +138,15 @@
     @Override
     public void abortJob(JobContext jobContext, State state) throws IOException {
         org.apache.hadoop.mapred.JobContext
-                mapRedJobContext = HCatMapRedUtil.createJobContext(jobContext);
-        if (dynamicPartitioningUsed){
+            mapRedJobContext = HCatMapRedUtil.createJobContext(jobContext);
+        if (dynamicPartitioningUsed) {
             discoverPartitions(jobContext);
         }
 
-        if(getBaseOutputCommitter() != null && !dynamicPartitioningUsed) {
+        if (getBaseOutputCommitter() != null && !dynamicPartitioningUsed) {
             getBaseOutputCommitter().abortJob(mapRedJobContext, state);
-        }
-        else if (dynamicPartitioningUsed){
-            for(JobContext currContext : contextDiscoveredByPath.values()){
+        } else if (dynamicPartitioningUsed) {
+            for (JobContext currContext : contextDiscoveredByPath.values()) {
                 try {
                     new JobConf(currContext.getConfiguration()).getOutputCommitter().abortJob(currContext, state);
                 } catch (Exception e) {
@@ -166,12 +165,12 @@
             // In the latter case the HCAT_KEY_TOKEN_SIGNATURE property in
             // the conf will not be set
             String tokenStrForm = client.getTokenStrForm();
-            if(tokenStrForm != null && jobContext.getConfiguration().get
-                    (HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
+            if (tokenStrForm != null && jobContext.getConfiguration().get
+                (HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
                 client.cancelDelegationToken(tokenStrForm);
             }
-        } catch(Exception e) {
-            if( e instanceof HCatException ) {
+        } catch (Exception e) {
+            if (e instanceof HCatException) {
                 throw (HCatException) e;
             } else {
                 throw new HCatException(ErrorType.ERROR_PUBLISHING_PARTITION, e);
@@ -182,10 +181,10 @@
 
         Path src;
         OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(jobContext);
-        if (dynamicPartitioningUsed){
+        if (dynamicPartitioningUsed) {
             src = new Path(getPartitionRootLocation(jobInfo.getLocation(),
                 jobInfo.getTableInfo().getTable().getPartitionKeysSize()));
-        }else{
+        } else {
             src = new Path(jobInfo.getLocation());
         }
         FileSystem fs = src.getFileSystem(jobContext.getConfiguration());
@@ -195,31 +194,31 @@
 
     public static final String SUCCEEDED_FILE_NAME = "_SUCCESS";
     static final String SUCCESSFUL_JOB_OUTPUT_DIR_MARKER =
-            "mapreduce.fileoutputcommitter.marksuccessfuljobs";
+        "mapreduce.fileoutputcommitter.marksuccessfuljobs";
 
     private static boolean getOutputDirMarking(Configuration conf) {
         return conf.getBoolean(SUCCESSFUL_JOB_OUTPUT_DIR_MARKER,
-                false);
+            false);
     }
 
     @Override
     public void commitJob(JobContext jobContext) throws IOException {
-        if (dynamicPartitioningUsed){
+        if (dynamicPartitioningUsed) {
             discoverPartitions(jobContext);
         }
-        if(getBaseOutputCommitter() != null && !dynamicPartitioningUsed) {
+        if (getBaseOutputCommitter() != null && !dynamicPartitioningUsed) {
             getBaseOutputCommitter().commitJob(HCatMapRedUtil.createJobContext(jobContext));
         }
         // create _SUCCESS FILE if so requested.
         OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(jobContext);
-        if(getOutputDirMarking(jobContext.getConfiguration())) {
+        if (getOutputDirMarking(jobContext.getConfiguration())) {
             Path outputPath = new Path(jobInfo.getLocation());
             if (outputPath != null) {
                 FileSystem fileSys = outputPath.getFileSystem(jobContext.getConfiguration());
                 // create a file in the folder to mark it
                 if (fileSys.exists(outputPath)) {
                     Path filePath = new Path(outputPath, SUCCEEDED_FILE_NAME);
-                    if(!fileSys.exists(filePath)) { // may have been created by baseCommitter.commitJob()
+                    if (!fileSys.exists(filePath)) { // may have been created by baseCommitter.commitJob()
                         fileSys.create(filePath).close();
                     }
                 }
@@ -231,7 +230,7 @@
     @Override
     public void cleanupJob(JobContext context) throws IOException {
 
-        if (dynamicPartitioningUsed){
+        if (dynamicPartitioningUsed) {
             discoverPartitions(context);
         }
 
@@ -242,13 +241,12 @@
         Path tblPath = new Path(table.getTTable().getSd().getLocation());
         FileSystem fs = tblPath.getFileSystem(conf);
 
-        if( table.getPartitionKeys().size() == 0 ) {
+        if (table.getPartitionKeys().size() == 0) {
             //non partitioned table
-            if(getBaseOutputCommitter() != null && !dynamicPartitioningUsed) {
-               getBaseOutputCommitter().cleanupJob(HCatMapRedUtil.createJobContext(context));
-            }
-            else if (dynamicPartitioningUsed){
-                for(JobContext currContext : contextDiscoveredByPath.values()){
+            if (getBaseOutputCommitter() != null && !dynamicPartitioningUsed) {
+                getBaseOutputCommitter().cleanupJob(HCatMapRedUtil.createJobContext(context));
+            } else if (dynamicPartitioningUsed) {
+                for (JobContext currContext : contextDiscoveredByPath.values()) {
                     try {
                         JobConf jobConf = new JobConf(currContext.getConfiguration());
                         jobConf.getOutputCommitter().cleanupJob(currContext);
@@ -285,35 +283,35 @@
             FsPermission perms = tblStat.getPermission();
 
             List<Partition> partitionsToAdd = new ArrayList<Partition>();
-            if (!dynamicPartitioningUsed){
+            if (!dynamicPartitioningUsed) {
                 partitionsToAdd.add(
-                        constructPartition(
-                                context,
-                                tblPath.toString(), jobInfo.getPartitionValues()
-                                ,jobInfo.getOutputSchema(), getStorerParameterMap(storer)
-                                ,table, fs
-                                ,grpName,perms));
-            }else{
-                for (Entry<String,Map<String,String>> entry : partitionsDiscoveredByPath.entrySet()){
+                    constructPartition(
+                        context,
+                        tblPath.toString(), jobInfo.getPartitionValues()
+                        , jobInfo.getOutputSchema(), getStorerParameterMap(storer)
+                        , table, fs
+                        , grpName, perms));
+            } else {
+                for (Entry<String, Map<String, String>> entry : partitionsDiscoveredByPath.entrySet()) {
                     partitionsToAdd.add(
-                            constructPartition(
-                                    context,
-                                    getPartitionRootLocation(entry.getKey(),entry.getValue().size()), entry.getValue()
-                                    ,jobInfo.getOutputSchema(), getStorerParameterMap(storer)
-                                    ,table, fs
-                                    ,grpName,perms));
+                        constructPartition(
+                            context,
+                            getPartitionRootLocation(entry.getKey(), entry.getValue().size()), entry.getValue()
+                            , jobInfo.getOutputSchema(), getStorerParameterMap(storer)
+                            , table, fs
+                            , grpName, perms));
                 }
             }
 
             //Publish the new partition(s)
-            if (dynamicPartitioningUsed && harProcessor.isEnabled() && (!partitionsToAdd.isEmpty())){
+            if (dynamicPartitioningUsed && harProcessor.isEnabled() && (!partitionsToAdd.isEmpty())) {
 
                 Path src = new Path(ptnRootLocation);
 
                 // check here for each dir we're copying out, to see if it already exists, error out if so
-                moveTaskOutputs(fs, src, src, tblPath,true);
+                moveTaskOutputs(fs, src, src, tblPath, true);
 
-                moveTaskOutputs(fs, src, src, tblPath,false);
+                moveTaskOutputs(fs, src, src, tblPath, false);
                 fs.delete(src, true);
 
 
@@ -326,18 +324,18 @@
                 try {
                     client.add_partitions(partitionsToAdd);
                     partitionsAdded = partitionsToAdd;
-                } catch (Exception e){
+                } catch (Exception e) {
                     // There was an error adding partitions : rollback fs copy and rethrow
-                    for (Partition p : partitionsToAdd){
+                    for (Partition p : partitionsToAdd) {
                         Path ptnPath = new Path(harProcessor.getParentFSPath(new Path(p.getSd().getLocation())));
-                        if (fs.exists(ptnPath)){
-                            fs.delete(ptnPath,true);
+                        if (fs.exists(ptnPath)) {
+                            fs.delete(ptnPath, true);
                         }
                     }
                     throw e;
                 }
 
-            }else{
+            } else {
                 // no harProcessor, regular operation
 
                 // No duplicate partition publish case to worry about because we'll
@@ -346,37 +344,37 @@
                 client.add_partitions(partitionsToAdd);
                 partitionsAdded = partitionsToAdd;
 
-                if (dynamicPartitioningUsed && (partitionsAdded.size()>0)){
+                if (dynamicPartitioningUsed && (partitionsAdded.size() > 0)) {
                     Path src = new Path(ptnRootLocation);
-                    moveTaskOutputs(fs, src, src, tblPath,false);
+                    moveTaskOutputs(fs, src, src, tblPath, false);
                     fs.delete(src, true);
                 }
 
             }
 
-            if(getBaseOutputCommitter() != null && !dynamicPartitioningUsed) {
+            if (getBaseOutputCommitter() != null && !dynamicPartitioningUsed) {
                 getBaseOutputCommitter().cleanupJob(HCatMapRedUtil.createJobContext(context));
             }
 
-            if(Security.getInstance().isSecurityEnabled()) {
+            if (Security.getInstance().isSecurityEnabled()) {
                 Security.getInstance().cancelToken(client, context);
             }
         } catch (Exception e) {
 
-            if( partitionsAdded.size() > 0 ) {
+            if (partitionsAdded.size() > 0) {
                 try {
                     //baseCommitter.cleanupJob failed, try to clean up the metastore
-                    for (Partition p : partitionsAdded){
+                    for (Partition p : partitionsAdded) {
                         client.dropPartition(tableInfo.getDatabaseName(),
-                                tableInfo.getTableName(), p.getValues());
+                            tableInfo.getTableName(), p.getValues());
                     }
-                } catch(Exception te) {
+                } catch (Exception te) {
                     //Keep cause as the original exception
                     throw new HCatException(ErrorType.ERROR_PUBLISHING_PARTITION, e);
                 }
             }
 
-            if( e instanceof HCatException ) {
+            if (e instanceof HCatException) {
                 throw (HCatException) e;
             } else {
                 throw new HCatException(ErrorType.ERROR_PUBLISHING_PARTITION, e);
@@ -386,11 +384,11 @@
         }
     }
 
-    private String getPartitionRootLocation(String ptnLocn,int numPtnKeys) {
-        if (ptnRootLocation  == null){
+    private String getPartitionRootLocation(String ptnLocn, int numPtnKeys) {
+        if (ptnRootLocation == null) {
             // we only need to calculate it once, it'll be the same for other partitions in this job.
             Path ptnRoot = new Path(ptnLocn);
-            for (int i = 0; i < numPtnKeys; i++){
+            for (int i = 0; i < numPtnKeys; i++) {
 //          LOG.info("Getting parent of "+ptnRoot.getName());
                 ptnRoot = ptnRoot.getParent();
             }
@@ -416,11 +414,11 @@
      */
 
     private Partition constructPartition(
-            JobContext context,
-            String partLocnRoot, Map<String,String> partKVs,
-            HCatSchema outputSchema, Map<String, String> params,
-            Table table, FileSystem fs,
-            String grpName, FsPermission perms) throws IOException {
+        JobContext context,
+        String partLocnRoot, Map<String, String> partKVs,
+        HCatSchema outputSchema, Map<String, String> params,
+        Table table, FileSystem fs,
+        String grpName, FsPermission perms) throws IOException {
 
         Partition partition = new Partition();
         partition.setDbName(table.getDbName());
@@ -428,7 +426,7 @@
         partition.setSd(new StorageDescriptor(table.getTTable().getSd()));
 
         List<FieldSchema> fields = new ArrayList<FieldSchema>();
-        for(HCatFieldSchema fieldSchema : outputSchema.getFields()) {
+        for (HCatFieldSchema fieldSchema : outputSchema.getFields()) {
             fields.add(HCatSchemaUtils.getFieldSchema(fieldSchema));
         }
 
@@ -450,16 +448,16 @@
         }
         // Apply the group and permissions to the leaf partition and files.
         applyGroupAndPerms(fs, partPath, perms, grpName, true);
-        if (dynamicPartitioningUsed){
-            String dynamicPartitionDestination = getFinalDynamicPartitionDestination(table,partKVs);
-            if (harProcessor.isEnabled()){
+        if (dynamicPartitioningUsed) {
+            String dynamicPartitionDestination = getFinalDynamicPartitionDestination(table, partKVs);
+            if (harProcessor.isEnabled()) {
                 harProcessor.exec(context, partition, partPath);
                 partition.getSd().setLocation(
-                        harProcessor.getProcessedLocation(new Path(dynamicPartitionDestination)));
-            }else{
+                    harProcessor.getProcessedLocation(new Path(dynamicPartitionDestination)));
+            } else {
                 partition.getSd().setLocation(dynamicPartitionDestination);
             }
-        }else{
+        } else {
             partition.getSd().setLocation(partPath.toString());
         }
 
@@ -467,8 +465,8 @@
     }
 
     private void applyGroupAndPerms(FileSystem fs, Path dir, FsPermission permission,
-            String group, boolean recursive)
-            throws IOException {
+                                    String group, boolean recursive)
+        throws IOException {
         fs.setPermission(dir, permission);
         try {
             fs.setOwner(dir, null, group);
@@ -491,11 +489,11 @@
         }
     }
 
-    private String getFinalDynamicPartitionDestination(Table table, Map<String,String> partKVs) {
+    private String getFinalDynamicPartitionDestination(Table table, Map<String, String> partKVs) {
         // file:///tmp/hcat_junit_warehouse/employee/_DYN0.7770480401313761/emp_country=IN/emp_state=KA  ->
         // file:///tmp/hcat_junit_warehouse/employee/emp_country=IN/emp_state=KA
         Path partPath = new Path(table.getTTable().getSd().getLocation());
-        for(FieldSchema partKey : table.getPartitionKeys()){
+        for (FieldSchema partKey : table.getPartitionKeys()) {
             partPath = constructPartialPartPath(partPath, partKey.getName().toLowerCase(), partKVs);
         }
         return partPath.toString();
@@ -505,13 +503,13 @@
         Map<String, String> params = new HashMap<String, String>();
 
         //Copy table level hcat.* keys to the partition
-        for(Entry<Object, Object> entry : storer.getProperties().entrySet()) {
+        for (Entry<Object, Object> entry : storer.getProperties().entrySet()) {
             params.put(entry.getKey().toString(), entry.getValue().toString());
         }
         return params;
     }
 
-    private Path constructPartialPartPath(Path partialPath, String partKey, Map<String,String> partKVs){
+    private Path constructPartialPartPath(Path partialPath, String partKey, Map<String, String> partKVs) {
 
         StringBuilder sb = new StringBuilder(FileUtils.escapePathName(partKey));
         sb.append("=");
@@ -534,7 +532,7 @@
 
         List<FieldSchema> newColumns = HCatUtil.validatePartitionSchema(table, partitionSchema);
 
-        if( newColumns.size() != 0 ) {
+        if (newColumns.size() != 0) {
             List<FieldSchema> tableColumns = new ArrayList<FieldSchema>(table.getTTable().getSd().getCols());
             tableColumns.addAll(newColumns);
 
@@ -561,12 +559,12 @@
         if (fs.isFile(file)) {
             Path finalOutputPath = getFinalPath(file, src, dest);
 
-            if (dryRun){
+            if (dryRun) {
 //        LOG.info("Testing if moving ["+file+"] to ["+finalOutputPath+"] would cause a problem");
-                if (fs.exists(finalOutputPath)){
+                if (fs.exists(finalOutputPath)) {
                     throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Data already exists in " + finalOutputPath + ", duplicate publish possible.");
                 }
-            }else{
+            } else {
 //        LOG.info("Moving ["+file+"] to ["+finalOutputPath+"]");
                 if (!fs.rename(file, finalOutputPath)) {
                     if (!fs.delete(finalOutputPath, true)) {
@@ -577,15 +575,15 @@
                     }
                 }
             }
-        } else if(fs.getFileStatus(file).isDir()) {
+        } else if (fs.getFileStatus(file).isDir()) {
             FileStatus[] paths = fs.listStatus(file);
             Path finalOutputPath = getFinalPath(file, src, dest);
-            if (!dryRun){
+            if (!dryRun) {
                 fs.mkdirs(finalOutputPath);
             }
             if (paths != null) {
                 for (FileStatus path : paths) {
-                    moveTaskOutputs(fs, path.getPath(), src, dest,dryRun);
+                    moveTaskOutputs(fs, path.getPath(), src, dest, dryRun);
                 }
             }
         }
@@ -606,7 +604,7 @@
         URI relativePath = src.toUri().relativize(taskOutputUri);
         if (taskOutputUri == relativePath) {
             throw new HCatException(ErrorType.ERROR_MOVE_FAILED, "Can not get the relative path: base = " +
-                    src + " child = " + file);
+                src + " child = " + file);
         }
         if (relativePath.getPath().length() > 0) {
             return new Path(dest, relativePath.getPath());
@@ -619,7 +617,7 @@
      * Run to discover dynamic partitions available
      */
     private void discoverPartitions(JobContext context) throws IOException {
-        if (!partitionsDiscovered){
+        if (!partitionsDiscovered) {
             //      LOG.info("discover ptns called");
             OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(context);
 
@@ -639,33 +637,33 @@
             Path pathPattern = new Path(dynPathSpec);
             FileStatus[] status = fs.globStatus(pathPattern);
 
-            partitionsDiscoveredByPath = new LinkedHashMap<String,Map<String, String>>();
-            contextDiscoveredByPath = new LinkedHashMap<String,JobContext>();
+            partitionsDiscoveredByPath = new LinkedHashMap<String, Map<String, String>>();
+            contextDiscoveredByPath = new LinkedHashMap<String, JobContext>();
 
 
             if (status.length == 0) {
                 //        LOG.warn("No partition found genereated by dynamic partitioning in ["
                 //            +loadPath+"] with depth["+jobInfo.getTable().getPartitionKeysSize()
                 //            +"], dynSpec["+dynPathSpec+"]");
-            }else{
-                if ((maxDynamicPartitions != -1) && (status.length > maxDynamicPartitions)){
+            } else {
+                if ((maxDynamicPartitions != -1) && (status.length > maxDynamicPartitions)) {
                     this.partitionsDiscovered = true;
                     throw new HCatException(ErrorType.ERROR_TOO_MANY_DYNAMIC_PTNS,
-                            "Number of dynamic partitions being created "
-                                    + "exceeds configured max allowable partitions["
-                                    + maxDynamicPartitions
-                                    + "], increase parameter ["
-                                    + HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS.varname
-                                    + "] if needed.");
+                        "Number of dynamic partitions being created "
+                            + "exceeds configured max allowable partitions["
+                            + maxDynamicPartitions
+                            + "], increase parameter ["
+                            + HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS.varname
+                            + "] if needed.");
                 }
 
-                for (FileStatus st : status){
+                for (FileStatus st : status) {
                     LinkedHashMap<String, String> fullPartSpec = new LinkedHashMap<String, String>();
                     Warehouse.makeSpecFromName(fullPartSpec, st.getPath());
-                    partitionsDiscoveredByPath.put(st.getPath().toString(),fullPartSpec);
-                    JobContext currContext = HCatHadoopShims.Instance.get().createJobContext(context.getConfiguration(),context.getJobID());
+                    partitionsDiscoveredByPath.put(st.getPath().toString(), fullPartSpec);
+                    JobContext currContext = HCatHadoopShims.Instance.get().createJobContext(context.getConfiguration(), context.getJobID());
                     HCatOutputFormat.configureOutputStorageHandler(context, jobInfo, fullPartSpec);
-                    contextDiscoveredByPath.put(st.getPath().toString(),currContext);
+                    contextDiscoveredByPath.put(st.getPath().toString(), currContext);
                 }
             }
 
diff --git a/src/java/org/apache/hcatalog/mapreduce/FileOutputFormatContainer.java b/src/java/org/apache/hcatalog/mapreduce/FileOutputFormatContainer.java
index 0af8650..22d73da 100644
--- a/src/java/org/apache/hcatalog/mapreduce/FileOutputFormatContainer.java
+++ b/src/java/org/apache/hcatalog/mapreduce/FileOutputFormatContainer.java
@@ -58,11 +58,11 @@
  */
 class FileOutputFormatContainer extends OutputFormatContainer {
 
-    private static final PathFilter hiddenFileFilter = new PathFilter(){
-      public boolean accept(Path p){
-        String name = p.getName();
-        return !name.startsWith("_") && !name.startsWith(".");
-      }
+    private static final PathFilter hiddenFileFilter = new PathFilter() {
+        public boolean accept(Path p) {
+            String name = p.getName();
+            return !name.startsWith("_") && !name.startsWith(".");
+        }
     };
 
     /**
@@ -80,32 +80,32 @@
         //Configure the output key and value classes.
         // This is required for writing null as key for file based tables.
         context.getConfiguration().set("mapred.output.key.class",
-                NullWritable.class.getName());
+            NullWritable.class.getName());
         String jobInfoString = context.getConfiguration().get(
-                HCatConstants.HCAT_KEY_OUTPUT_INFO);
+            HCatConstants.HCAT_KEY_OUTPUT_INFO);
         OutputJobInfo jobInfo = (OutputJobInfo) HCatUtil
-                .deserialize(jobInfoString);
+            .deserialize(jobInfoString);
         StorerInfo storeInfo = jobInfo.getTableInfo().getStorerInfo();
         HCatStorageHandler storageHandler = HCatUtil.getStorageHandler(
-                context.getConfiguration(), storeInfo);
+            context.getConfiguration(), storeInfo);
         Class<? extends SerDe> serde = storageHandler.getSerDeClass();
         SerDe sd = (SerDe) ReflectionUtils.newInstance(serde,
-                context.getConfiguration());
+            context.getConfiguration());
         context.getConfiguration().set("mapred.output.value.class",
-                sd.getSerializedClass().getName());
+            sd.getSerializedClass().getName());
 
         // When Dynamic partitioning is used, the RecordWriter instance initialized here isn't used. Can use null.
         // (That's because records can't be written until the values of the dynamic partitions are deduced.
         // By that time, a new local instance of RecordWriter, with the correct output-path, will be constructed.)
         RecordWriter<WritableComparable<?>, HCatRecord> rw =
             new FileRecordWriterContainer(
-                HCatBaseOutputFormat.getJobInfo(context).isDynamicPartitioningUsed()?
-                    null:
+                HCatBaseOutputFormat.getJobInfo(context).isDynamicPartitioningUsed() ?
+                    null :
                     getBaseOutputFormat()
-                            .getRecordWriter(null,
-                                                     new JobConf(context.getConfiguration()),
-                                                                         FileOutputFormat.getUniqueName(new JobConf(context.getConfiguration()), "part"),
-                                                                         InternalUtil.createReporter(context)),
+                        .getRecordWriter(null,
+                            new JobConf(context.getConfiguration()),
+                            FileOutputFormat.getUniqueName(new JobConf(context.getConfiguration()), "part"),
+                            InternalUtil.createReporter(context)),
                 context);
         return rw;
     }
@@ -118,9 +118,9 @@
             HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
             client = HCatUtil.getHiveClient(hiveConf);
             handleDuplicatePublish(context,
-                    jobInfo,
-                    client,
-                    new Table(jobInfo.getTableInfo().getTable()));
+                jobInfo,
+                client,
+                new Table(jobInfo.getTableInfo().getTable()));
         } catch (MetaException e) {
             throw new IOException(e);
         } catch (TException e) {
@@ -131,11 +131,11 @@
             HCatUtil.closeHiveClientQuietly(client);
         }
 
-        if(!jobInfo.isDynamicPartitioningUsed()) {
+        if (!jobInfo.isDynamicPartitioningUsed()) {
             JobConf jobConf = new JobConf(context.getConfiguration());
             getBaseOutputFormat().checkOutputSpecs(null, jobConf);
             //checkoutputspecs might've set some properties we need to have context reflect that
-            HCatUtil.copyConf(jobConf,context.getConfiguration());
+            HCatUtil.copyConf(jobConf, context.getConfiguration());
         }
     }
 
@@ -144,9 +144,9 @@
         //this needs to be manually set, under normal circumstances MR Task does this
         setWorkOutputPath(context);
         return new FileOutputCommitterContainer(context,
-               HCatBaseOutputFormat.getJobInfo(context).isDynamicPartitioningUsed()?
-                       null:
-                       new JobConf(context.getConfiguration()).getOutputCommitter());
+            HCatBaseOutputFormat.getJobInfo(context).isDynamicPartitioningUsed() ?
+                null :
+                new JobConf(context.getConfiguration()).getOutputCommitter());
     }
 
     /**
@@ -162,7 +162,7 @@
      * @throws org.apache.thrift.TException
      */
     private static void handleDuplicatePublish(JobContext context, OutputJobInfo outputInfo,
-      HiveMetaStoreClient client, Table table) throws IOException, MetaException, TException, NoSuchObjectException {
+                                               HiveMetaStoreClient client, Table table) throws IOException, MetaException, TException, NoSuchObjectException {
 
         /*
         * For fully specified ptn, follow strict checks for existence of partitions in metadata
@@ -173,32 +173,32 @@
         *    there are a large number of partitions that match the partial specifications
         */
 
-        if( table.getPartitionKeys().size() > 0 ) {
-            if (!outputInfo.isDynamicPartitioningUsed()){
+        if (table.getPartitionKeys().size() > 0) {
+            if (!outputInfo.isDynamicPartitioningUsed()) {
                 List<String> partitionValues = getPartitionValueList(
-                        table, outputInfo.getPartitionValues());
+                    table, outputInfo.getPartitionValues());
                 // fully-specified partition
                 List<String> currentParts = client.listPartitionNames(outputInfo.getDatabaseName(),
-                        outputInfo.getTableName(), partitionValues, (short) 1);
+                    outputInfo.getTableName(), partitionValues, (short) 1);
 
-                if( currentParts.size() > 0 ) {
+                if (currentParts.size() > 0) {
                     throw new HCatException(ErrorType.ERROR_DUPLICATE_PARTITION);
                 }
             }
         } else {
             List<String> partitionValues = getPartitionValueList(
-                    table, outputInfo.getPartitionValues());
+                table, outputInfo.getPartitionValues());
             // non-partitioned table
 
             Path tablePath = new Path(table.getTTable().getSd().getLocation());
             FileSystem fs = tablePath.getFileSystem(context.getConfiguration());
 
-            if ( fs.exists(tablePath) ) {
+            if (fs.exists(tablePath)) {
                 FileStatus[] status = fs.globStatus(new Path(tablePath, "*"), hiddenFileFilter);
 
-                if( status.length > 0 ) {
+                if (status.length > 0) {
                     throw new HCatException(ErrorType.ERROR_NON_EMPTY_TABLE,
-                            table.getDbName() + "." + table.getTableName());
+                        table.getDbName() + "." + table.getTableName());
                 }
             }
         }
@@ -213,22 +213,22 @@
      */
     static List<String> getPartitionValueList(Table table, Map<String, String> valueMap) throws IOException {
 
-        if( valueMap.size() != table.getPartitionKeys().size() ) {
+        if (valueMap.size() != table.getPartitionKeys().size()) {
             throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES,
-                    "Table "
-                            + table.getTableName() + " has " +
-                            table.getPartitionKeys().size() + " partition keys, got "+
-                            valueMap.size());
+                "Table "
+                    + table.getTableName() + " has " +
+                    table.getPartitionKeys().size() + " partition keys, got " +
+                    valueMap.size());
         }
 
         List<String> values = new ArrayList<String>();
 
-        for(FieldSchema schema : table.getPartitionKeys()) {
+        for (FieldSchema schema : table.getPartitionKeys()) {
             String value = valueMap.get(schema.getName().toLowerCase());
 
-            if( value == null ) {
+            if (value == null) {
                 throw new HCatException(ErrorType.ERROR_MISSING_PARTITION_KEY,
-                        "Key " + schema.getName() + " of table " + table.getTableName());
+                    "Key " + schema.getName() + " of table " + table.getTableName());
             }
 
             values.add(value);
@@ -241,8 +241,8 @@
         String outputPath = context.getConfiguration().get("mapred.output.dir");
         //we need to do this to get the task path and set it for mapred implementation
         //since it can't be done automatically because of mapreduce->mapred abstraction
-        if(outputPath != null)
+        if (outputPath != null)
             context.getConfiguration().set("mapred.work.output.dir",
-                    new FileOutputCommitter(new Path(outputPath), context).getWorkPath().toString());
+                new FileOutputCommitter(new Path(outputPath), context).getWorkPath().toString());
     }
 }
diff --git a/src/java/org/apache/hcatalog/mapreduce/FileRecordWriterContainer.java b/src/java/org/apache/hcatalog/mapreduce/FileRecordWriterContainer.java
index 9e21316..b69e2a7 100644
--- a/src/java/org/apache/hcatalog/mapreduce/FileRecordWriterContainer.java
+++ b/src/java/org/apache/hcatalog/mapreduce/FileRecordWriterContainer.java
@@ -80,17 +80,17 @@
      */
     public FileRecordWriterContainer(org.apache.hadoop.mapred.RecordWriter<? super WritableComparable<?>, ? super Writable> baseWriter,
                                      TaskAttemptContext context) throws IOException, InterruptedException {
-        super(context,baseWriter);
+        super(context, baseWriter);
         this.context = context;
         jobInfo = HCatOutputFormat.getJobInfo(context);
 
         storageHandler = HCatUtil.getStorageHandler(context.getConfiguration(), jobInfo.getTableInfo().getStorerInfo());
-        serDe = ReflectionUtils.newInstance(storageHandler.getSerDeClass(),context.getConfiguration());
+        serDe = ReflectionUtils.newInstance(storageHandler.getSerDeClass(), context.getConfiguration());
         objectInspector = InternalUtil.createStructObjectInspector(jobInfo.getOutputSchema());
         try {
             InternalUtil.initializeOutputSerDe(serDe, context.getConfiguration(), jobInfo);
         } catch (SerDeException e) {
-            throw new IOException("Failed to inialize SerDe",e);
+            throw new IOException("Failed to inialize SerDe", e);
         }
 
         // If partition columns occur in data, we want to remove them.
@@ -99,9 +99,9 @@
         dynamicPartCols = jobInfo.getPosOfDynPartCols();
         maxDynamicPartitions = jobInfo.getMaxDynamicPartitions();
 
-        if((partColsToDel == null) || (dynamicPartitioningUsed && (dynamicPartCols == null))){
+        if ((partColsToDel == null) || (dynamicPartitioningUsed && (dynamicPartCols == null))) {
             throw new HCatException("It seems that setSchema() is not called on " +
-                    "HCatOutputFormat. Please make sure that method is called.");
+                "HCatOutputFormat. Please make sure that method is called.");
         }
 
 
@@ -112,10 +112,9 @@
             this.dynamicContexts = null;
             this.dynamicObjectInspectors = null;
             this.dynamicOutputJobInfo = null;
-        }
-        else {
-            this.baseDynamicSerDe = new HashMap<String,SerDe>();
-            this.baseDynamicWriters = new HashMap<String,org.apache.hadoop.mapred.RecordWriter<? super WritableComparable<?>, ? super Writable>>();
+        } else {
+            this.baseDynamicSerDe = new HashMap<String, SerDe>();
+            this.baseDynamicWriters = new HashMap<String, org.apache.hadoop.mapred.RecordWriter<? super WritableComparable<?>, ? super Writable>>();
             this.baseDynamicCommitters = new HashMap<String, org.apache.hadoop.mapred.OutputCommitter>();
             this.dynamicContexts = new HashMap<String, org.apache.hadoop.mapred.TaskAttemptContext>();
             this.dynamicObjectInspectors = new HashMap<String, ObjectInspector>();
@@ -132,17 +131,17 @@
 
     @Override
     public void close(TaskAttemptContext context) throws IOException,
-            InterruptedException {
+        InterruptedException {
         Reporter reporter = InternalUtil.createReporter(context);
-        if (dynamicPartitioningUsed){
-            for (org.apache.hadoop.mapred.RecordWriter<? super WritableComparable<?>, ? super Writable> bwriter : baseDynamicWriters.values()){
+        if (dynamicPartitioningUsed) {
+            for (org.apache.hadoop.mapred.RecordWriter<? super WritableComparable<?>, ? super Writable> bwriter : baseDynamicWriters.values()) {
                 //We are in RecordWriter.close() make sense that the context would be TaskInputOutput
                 bwriter.close(reporter);
             }
-            for(Map.Entry<String,org.apache.hadoop.mapred.OutputCommitter>entry : baseDynamicCommitters.entrySet()) {
+            for (Map.Entry<String, org.apache.hadoop.mapred.OutputCommitter> entry : baseDynamicCommitters.entrySet()) {
                 org.apache.hadoop.mapred.TaskAttemptContext currContext = dynamicContexts.get(entry.getKey());
                 OutputCommitter baseOutputCommitter = entry.getValue();
-                if (baseOutputCommitter.needsTaskCommit(currContext)){
+                if (baseOutputCommitter.needsTaskCommit(currContext)) {
                     baseOutputCommitter.commitTask(currContext);
                 }
                 org.apache.hadoop.mapred.JobContext currJobContext = HCatMapRedUtil.createJobContext(currContext);
@@ -155,93 +154,92 @@
 
     @Override
     public void write(WritableComparable<?> key, HCatRecord value) throws IOException,
-            InterruptedException {
+        InterruptedException {
 
         org.apache.hadoop.mapred.RecordWriter localWriter;
         ObjectInspector localObjectInspector;
         SerDe localSerDe;
         OutputJobInfo localJobInfo = null;
 
-        if (dynamicPartitioningUsed){
+        if (dynamicPartitioningUsed) {
             // calculate which writer to use from the remaining values - this needs to be done before we delete cols
             List<String> dynamicPartValues = new ArrayList<String>();
-            for (Integer colToAppend :  dynamicPartCols){
+            for (Integer colToAppend : dynamicPartCols) {
                 dynamicPartValues.add(value.get(colToAppend).toString());
             }
 
             String dynKey = dynamicPartValues.toString();
-            if (!baseDynamicWriters.containsKey(dynKey)){
-                if ((maxDynamicPartitions != -1) && (baseDynamicWriters.size() > maxDynamicPartitions)){
+            if (!baseDynamicWriters.containsKey(dynKey)) {
+                if ((maxDynamicPartitions != -1) && (baseDynamicWriters.size() > maxDynamicPartitions)) {
                     throw new HCatException(ErrorType.ERROR_TOO_MANY_DYNAMIC_PTNS,
-                            "Number of dynamic partitions being created "
-                                    + "exceeds configured max allowable partitions["
-                                    + maxDynamicPartitions
-                                    + "], increase parameter ["
-                                    + HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS.varname
-                                    + "] if needed.");
+                        "Number of dynamic partitions being created "
+                            + "exceeds configured max allowable partitions["
+                            + maxDynamicPartitions
+                            + "], increase parameter ["
+                            + HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS.varname
+                            + "] if needed.");
                 }
 
                 org.apache.hadoop.mapred.TaskAttemptContext currTaskContext = HCatMapRedUtil.createTaskAttemptContext(context);
                 configureDynamicStorageHandler(currTaskContext, dynamicPartValues);
-                localJobInfo= HCatBaseOutputFormat.getJobInfo(currTaskContext);
+                localJobInfo = HCatBaseOutputFormat.getJobInfo(currTaskContext);
 
                 //setup serDe
                 SerDe currSerDe = ReflectionUtils.newInstance(storageHandler.getSerDeClass(), currTaskContext.getJobConf());
                 try {
                     InternalUtil.initializeOutputSerDe(currSerDe, currTaskContext.getConfiguration(), localJobInfo);
                 } catch (SerDeException e) {
-                    throw new IOException("Failed to initialize SerDe",e);
+                    throw new IOException("Failed to initialize SerDe", e);
                 }
 
                 //create base OutputFormat
                 org.apache.hadoop.mapred.OutputFormat baseOF =
-                        ReflectionUtils.newInstance(storageHandler.getOutputFormatClass(), currTaskContext.getJobConf());
+                    ReflectionUtils.newInstance(storageHandler.getOutputFormatClass(), currTaskContext.getJobConf());
                 //check outputSpecs
-                baseOF.checkOutputSpecs(null,currTaskContext.getJobConf());
+                baseOF.checkOutputSpecs(null, currTaskContext.getJobConf());
                 //get Output Committer
-                org.apache.hadoop.mapred.OutputCommitter baseOutputCommitter =  currTaskContext.getJobConf().getOutputCommitter();
+                org.apache.hadoop.mapred.OutputCommitter baseOutputCommitter = currTaskContext.getJobConf().getOutputCommitter();
                 //create currJobContext the latest so it gets all the config changes
                 org.apache.hadoop.mapred.JobContext currJobContext = HCatMapRedUtil.createJobContext(currTaskContext);
                 //setupJob()
                 baseOutputCommitter.setupJob(currJobContext);
                 //recreate to refresh jobConf of currTask context
                 currTaskContext =
-                        HCatMapRedUtil.createTaskAttemptContext(currJobContext.getJobConf(),
-                                                                                        currTaskContext.getTaskAttemptID(),
-                                                                                        currTaskContext.getProgressible());
+                    HCatMapRedUtil.createTaskAttemptContext(currJobContext.getJobConf(),
+                        currTaskContext.getTaskAttemptID(),
+                        currTaskContext.getProgressible());
                 //set temp location
                 currTaskContext.getConfiguration().set("mapred.work.output.dir",
-                                new FileOutputCommitter(new Path(localJobInfo.getLocation()),currTaskContext).getWorkPath().toString());
+                    new FileOutputCommitter(new Path(localJobInfo.getLocation()), currTaskContext).getWorkPath().toString());
                 //setupTask()
                 baseOutputCommitter.setupTask(currTaskContext);
 
                 org.apache.hadoop.mapred.RecordWriter baseRecordWriter =
-                        baseOF.getRecordWriter(null,
-                                                            currTaskContext.getJobConf(),
-                                                            FileOutputFormat.getUniqueFile(currTaskContext, "part", ""),
-                                                            InternalUtil.createReporter(currTaskContext));
+                    baseOF.getRecordWriter(null,
+                        currTaskContext.getJobConf(),
+                        FileOutputFormat.getUniqueFile(currTaskContext, "part", ""),
+                        InternalUtil.createReporter(currTaskContext));
 
                 baseDynamicWriters.put(dynKey, baseRecordWriter);
-                baseDynamicSerDe.put(dynKey,currSerDe);
-                baseDynamicCommitters.put(dynKey,baseOutputCommitter);
-                dynamicContexts.put(dynKey,currTaskContext);
-                dynamicObjectInspectors.put(dynKey,InternalUtil.createStructObjectInspector(jobInfo.getOutputSchema()));
+                baseDynamicSerDe.put(dynKey, currSerDe);
+                baseDynamicCommitters.put(dynKey, baseOutputCommitter);
+                dynamicContexts.put(dynKey, currTaskContext);
+                dynamicObjectInspectors.put(dynKey, InternalUtil.createStructObjectInspector(jobInfo.getOutputSchema()));
                 dynamicOutputJobInfo.put(dynKey, HCatOutputFormat.getJobInfo(dynamicContexts.get(dynKey)));
             }
-            
+
             localJobInfo = dynamicOutputJobInfo.get(dynKey);
             localWriter = baseDynamicWriters.get(dynKey);
             localSerDe = baseDynamicSerDe.get(dynKey);
             localObjectInspector = dynamicObjectInspectors.get(dynKey);
-        }
-        else{
+        } else {
             localJobInfo = jobInfo;
             localWriter = getBaseRecordWriter();
             localSerDe = serDe;
             localObjectInspector = objectInspector;
         }
 
-        for(Integer colToDel : partColsToDel){
+        for (Integer colToDel : partColsToDel) {
             value.remove(colToDel);
         }
 
@@ -250,7 +248,7 @@
         try {
             localWriter.write(NullWritable.get(), localSerDe.serialize(value.getAll(), localObjectInspector));
         } catch (SerDeException e) {
-            throw new IOException("Failed to serialize object",e);
+            throw new IOException("Failed to serialize object", e);
         }
     }
 
diff --git a/src/java/org/apache/hcatalog/mapreduce/FosterStorageHandler.java b/src/java/org/apache/hcatalog/mapreduce/FosterStorageHandler.java
index 27a6ed6..3f368d8 100644
--- a/src/java/org/apache/hcatalog/mapreduce/FosterStorageHandler.java
+++ b/src/java/org/apache/hcatalog/mapreduce/FosterStorageHandler.java
@@ -52,18 +52,19 @@
     /** The directory under which data is initially written for a non partitioned table */
     protected static final String TEMP_DIR_NAME = "_TEMP";
 
-   private Class<? extends InputFormat> ifClass;
-   private Class<? extends OutputFormat> ofClass;
-   private Class<? extends SerDe> serDeClass;
+    private Class<? extends InputFormat> ifClass;
+    private Class<? extends OutputFormat> ofClass;
+    private Class<? extends SerDe> serDeClass;
 
     public FosterStorageHandler(String ifName, String ofName, String serdeName) throws ClassNotFoundException {
         this((Class<? extends InputFormat>) Class.forName(ifName),
-                (Class<? extends OutputFormat>) Class.forName(ofName),
-                (Class<? extends SerDe>) Class.forName(serdeName));
+            (Class<? extends OutputFormat>) Class.forName(ofName),
+            (Class<? extends SerDe>) Class.forName(serdeName));
     }
+
     public FosterStorageHandler(Class<? extends InputFormat> ifClass,
-                                               Class<? extends OutputFormat> ofClass,
-                                               Class<? extends SerDe> serDeClass) {
+                                Class<? extends OutputFormat> ofClass,
+                                Class<? extends SerDe> serDeClass) {
         this.ifClass = ifClass;
         this.ofClass = ofClass;
         this.serDeClass = serDeClass;
@@ -97,36 +98,35 @@
 
     @Override
     public void configureOutputJobProperties(TableDesc tableDesc,
-                                      Map<String, String> jobProperties) {
+                                             Map<String, String> jobProperties) {
         try {
             OutputJobInfo jobInfo = (OutputJobInfo)
-              HCatUtil.deserialize(tableDesc.getJobProperties().get(
-                                      HCatConstants.HCAT_KEY_OUTPUT_INFO));
+                HCatUtil.deserialize(tableDesc.getJobProperties().get(
+                    HCatConstants.HCAT_KEY_OUTPUT_INFO));
             String parentPath = jobInfo.getTableInfo().getTableLocation();
             String dynHash = tableDesc.getJobProperties().get(
-                                      HCatConstants.HCAT_DYNAMIC_PTN_JOBID);
+                HCatConstants.HCAT_DYNAMIC_PTN_JOBID);
 
             // For dynamic partitioned writes without all keyvalues specified,
             // we create a temp dir for the associated write job
-            if (dynHash != null){
+            if (dynHash != null) {
                 parentPath = new Path(parentPath,
-                                      DYNTEMP_DIR_NAME+dynHash).toString();
+                    DYNTEMP_DIR_NAME + dynHash).toString();
             }
 
             String outputLocation;
 
             // For non-partitioned tables, we send them to the temp dir
-            if(dynHash == null && jobInfo.getPartitionValues().size() == 0) {
+            if (dynHash == null && jobInfo.getPartitionValues().size() == 0) {
                 outputLocation = TEMP_DIR_NAME;
-            }
-            else {
+            } else {
                 List<String> cols = new ArrayList<String>();
                 List<String> values = new ArrayList<String>();
 
                 //Get the output location in the order partition keys are defined for the table.
-                for(String name:
+                for (String name :
                     jobInfo.getTableInfo().
-                    getPartitionColumns().getFieldNames()) {
+                        getPartitionColumns().getFieldNames()) {
                     String value = jobInfo.getPartitionValues().get(name);
                     cols.add(name);
                     values.add(value);
@@ -134,29 +134,29 @@
                 outputLocation = FileUtils.makePartName(cols, values);
             }
 
-            jobInfo.setLocation(new Path(parentPath,outputLocation).toString());
+            jobInfo.setLocation(new Path(parentPath, outputLocation).toString());
 
             //only set output dir if partition is fully materialized
-            if(jobInfo.getPartitionValues().size()
+            if (jobInfo.getPartitionValues().size()
                 == jobInfo.getTableInfo().getPartitionColumns().size()) {
                 jobProperties.put("mapred.output.dir", jobInfo.getLocation());
             }
 
             //TODO find a better home for this, RCFile specifc
             jobProperties.put(RCFile.COLUMN_NUMBER_CONF_STR,
-                              Integer.toOctalString(
-                                jobInfo.getOutputSchema().getFields().size()));
+                Integer.toOctalString(
+                    jobInfo.getOutputSchema().getFields().size()));
             jobProperties.put(HCatConstants.HCAT_KEY_OUTPUT_INFO,
-                              HCatUtil.serialize(jobInfo));
+                HCatUtil.serialize(jobInfo));
         } catch (IOException e) {
-            throw new IllegalStateException("Failed to set output path",e);
+            throw new IllegalStateException("Failed to set output path", e);
         }
 
     }
 
     @Override
     OutputFormatContainer getOutputFormatContainer(
-              org.apache.hadoop.mapred.OutputFormat outputFormat) {
+        org.apache.hadoop.mapred.OutputFormat outputFormat) {
         return new FileOutputFormatContainer(outputFormat);
     }
 
@@ -172,7 +172,7 @@
 
     @Override
     public HiveAuthorizationProvider getAuthorizationProvider()
-      throws HiveException {
+        throws HiveException {
         return new DefaultHiveAuthorizationProvider();
     }
 
diff --git a/src/java/org/apache/hcatalog/mapreduce/HCatBaseInputFormat.java b/src/java/org/apache/hcatalog/mapreduce/HCatBaseInputFormat.java
index b484a02..6f77a23 100644
--- a/src/java/org/apache/hcatalog/mapreduce/HCatBaseInputFormat.java
+++ b/src/java/org/apache/hcatalog/mapreduce/HCatBaseInputFormat.java
@@ -46,272 +46,271 @@
 import org.apache.hcatalog.data.schema.HCatFieldSchema;
 import org.apache.hcatalog.data.schema.HCatSchema;
 
-public abstract class HCatBaseInputFormat 
-  extends InputFormat<WritableComparable, HCatRecord> {
-  
-  /**
-   * get the schema for the HCatRecord data returned by HCatInputFormat.
-   * 
-   * @param context the jobContext
-   * @throws IllegalArgumentException
-   */
-  private Class<? extends InputFormat> inputFileFormatClass;
+public abstract class HCatBaseInputFormat
+    extends InputFormat<WritableComparable, HCatRecord> {
 
-  // TODO needs to go in InitializeInput? as part of InputJobInfo
-  public static HCatSchema getOutputSchema(JobContext context) 
-    throws IOException {
-    String os = context.getConfiguration().get(
-                                HCatConstants.HCAT_KEY_OUTPUT_SCHEMA);
-    if (os == null) {
-      return getTableSchema(context);
-    } else {
-      return (HCatSchema) HCatUtil.deserialize(os);
-    }
-  }
-  
-  /**
-   * Set the schema for the HCatRecord data returned by HCatInputFormat.
-   * @param job the job object
-   * @param hcatSchema the schema to use as the consolidated schema
-   */
-  public static void setOutputSchema(Job job,HCatSchema hcatSchema) 
-    throws IOException {
-    job.getConfiguration().set(HCatConstants.HCAT_KEY_OUTPUT_SCHEMA, 
-                               HCatUtil.serialize(hcatSchema));
-  }
+    /**
+     * get the schema for the HCatRecord data returned by HCatInputFormat.
+     *
+     * @param context the jobContext
+     * @throws IllegalArgumentException
+     */
+    private Class<? extends InputFormat> inputFileFormatClass;
 
-  protected static
-    org.apache.hadoop.mapred.InputFormat<WritableComparable, Writable>
-    getMapRedInputFormat (JobConf job, Class inputFormatClass) throws IOException {
-      return (
-          org.apache.hadoop.mapred.InputFormat<WritableComparable, Writable>) 
-        ReflectionUtils.newInstance(inputFormatClass, job);
-  }
-
-  /**
-   * Logically split the set of input files for the job. Returns the
-   * underlying InputFormat's splits
-   * @param jobContext the job context object
-   * @return the splits, an HCatInputSplit wrapper over the storage
-   *         handler InputSplits
-   * @throws IOException or InterruptedException
-   */
-  @Override
-  public List<InputSplit> getSplits(JobContext jobContext)
-  throws IOException, InterruptedException {
-
-    //Get the job info from the configuration,
-    //throws exception if not initialized
-    InputJobInfo inputJobInfo;
-    try {
-      inputJobInfo = getJobInfo(jobContext);
-    } catch (Exception e) {
-      throw new IOException(e);
+    // TODO needs to go in InitializeInput? as part of InputJobInfo
+    public static HCatSchema getOutputSchema(JobContext context)
+        throws IOException {
+        String os = context.getConfiguration().get(
+            HCatConstants.HCAT_KEY_OUTPUT_SCHEMA);
+        if (os == null) {
+            return getTableSchema(context);
+        } else {
+            return (HCatSchema) HCatUtil.deserialize(os);
+        }
     }
 
-    List<InputSplit> splits = new ArrayList<InputSplit>();
-    List<PartInfo> partitionInfoList = inputJobInfo.getPartitions();
-    if(partitionInfoList == null ) {
-      //No partitions match the specified partition filter
-      return splits;
+    /**
+     * Set the schema for the HCatRecord data returned by HCatInputFormat.
+     * @param job the job object
+     * @param hcatSchema the schema to use as the consolidated schema
+     */
+    public static void setOutputSchema(Job job, HCatSchema hcatSchema)
+        throws IOException {
+        job.getConfiguration().set(HCatConstants.HCAT_KEY_OUTPUT_SCHEMA,
+            HCatUtil.serialize(hcatSchema));
     }
 
-    HCatStorageHandler storageHandler;
-    JobConf jobConf;
-    Configuration conf = jobContext.getConfiguration();
-    //For each matching partition, call getSplits on the underlying InputFormat
-    for(PartInfo partitionInfo : partitionInfoList) {
-      jobConf = HCatUtil.getJobConfFromContext(jobContext);
-      setInputPath(jobConf, partitionInfo.getLocation());
-      Map<String,String> jobProperties = partitionInfo.getJobProperties();
-
-      HCatSchema allCols = new HCatSchema(new LinkedList<HCatFieldSchema>());
-      for(HCatFieldSchema field: 
-          inputJobInfo.getTableInfo().getDataColumns().getFields())
-          allCols.append(field);
-      for(HCatFieldSchema field: 
-          inputJobInfo.getTableInfo().getPartitionColumns().getFields())
-          allCols.append(field);
-
-      HCatUtil.copyJobPropertiesToJobConf(jobProperties, jobConf);
-
-      storageHandler = HCatUtil.getStorageHandler(
-          jobConf, partitionInfo);
-
-      //Get the input format
-      Class inputFormatClass = storageHandler.getInputFormatClass();
-      org.apache.hadoop.mapred.InputFormat inputFormat = 
-                            getMapRedInputFormat(jobConf, inputFormatClass);
-
-      //Call getSplit on the InputFormat, create an
-      //HCatSplit for each underlying split
-      //NumSplits is 0 for our purposes
-      org.apache.hadoop.mapred.InputSplit[] baseSplits = 
-        inputFormat.getSplits(jobConf, 0);
-
-      for(org.apache.hadoop.mapred.InputSplit split : baseSplits) {
-        splits.add(new HCatSplit(
-            partitionInfo,
-            split,allCols));
-      }
+    protected static org.apache.hadoop.mapred.InputFormat<WritableComparable, Writable>
+    getMapRedInputFormat(JobConf job, Class inputFormatClass) throws IOException {
+        return (
+            org.apache.hadoop.mapred.InputFormat<WritableComparable, Writable>)
+            ReflectionUtils.newInstance(inputFormatClass, job);
     }
 
-    return splits;
-  }
+    /**
+     * Logically split the set of input files for the job. Returns the
+     * underlying InputFormat's splits
+     * @param jobContext the job context object
+     * @return the splits, an HCatInputSplit wrapper over the storage
+     *         handler InputSplits
+     * @throws IOException or InterruptedException
+     */
+    @Override
+    public List<InputSplit> getSplits(JobContext jobContext)
+        throws IOException, InterruptedException {
 
-  /**
-   * Create the RecordReader for the given InputSplit. Returns the underlying
-   * RecordReader if the required operations are supported and schema matches
-   * with HCatTable schema. Returns an HCatRecordReader if operations need to
-   * be implemented in HCat.
-   * @param split the split
-   * @param taskContext the task attempt context
-   * @return the record reader instance, either an HCatRecordReader(later) or
-   *         the underlying storage handler's RecordReader
-   * @throws IOException or InterruptedException
-   */
-  @Override
-  public RecordReader<WritableComparable, HCatRecord> 
-  createRecordReader(InputSplit split,
-      TaskAttemptContext taskContext) throws IOException, InterruptedException {
+        //Get the job info from the configuration,
+        //throws exception if not initialized
+        InputJobInfo inputJobInfo;
+        try {
+            inputJobInfo = getJobInfo(jobContext);
+        } catch (Exception e) {
+            throw new IOException(e);
+        }
 
-    HCatSplit hcatSplit = InternalUtil.castToHCatSplit(split);
-    PartInfo partitionInfo = hcatSplit.getPartitionInfo();
-    JobContext jobContext = taskContext;
+        List<InputSplit> splits = new ArrayList<InputSplit>();
+        List<PartInfo> partitionInfoList = inputJobInfo.getPartitions();
+        if (partitionInfoList == null) {
+            //No partitions match the specified partition filter
+            return splits;
+        }
 
-    HCatStorageHandler storageHandler = HCatUtil.getStorageHandler(
-        jobContext.getConfiguration(), partitionInfo);
-    
-    JobConf jobConf = HCatUtil.getJobConfFromContext(jobContext);
-    Map<String, String> jobProperties = partitionInfo.getJobProperties();
-    HCatUtil.copyJobPropertiesToJobConf(jobProperties, jobConf);
+        HCatStorageHandler storageHandler;
+        JobConf jobConf;
+        Configuration conf = jobContext.getConfiguration();
+        //For each matching partition, call getSplits on the underlying InputFormat
+        for (PartInfo partitionInfo : partitionInfoList) {
+            jobConf = HCatUtil.getJobConfFromContext(jobContext);
+            setInputPath(jobConf, partitionInfo.getLocation());
+            Map<String, String> jobProperties = partitionInfo.getJobProperties();
 
-    Map<String,String> valuesNotInDataCols = getColValsNotInDataColumns(
-        getOutputSchema(jobContext),partitionInfo
+            HCatSchema allCols = new HCatSchema(new LinkedList<HCatFieldSchema>());
+            for (HCatFieldSchema field :
+                inputJobInfo.getTableInfo().getDataColumns().getFields())
+                allCols.append(field);
+            for (HCatFieldSchema field :
+                inputJobInfo.getTableInfo().getPartitionColumns().getFields())
+                allCols.append(field);
+
+            HCatUtil.copyJobPropertiesToJobConf(jobProperties, jobConf);
+
+            storageHandler = HCatUtil.getStorageHandler(
+                jobConf, partitionInfo);
+
+            //Get the input format
+            Class inputFormatClass = storageHandler.getInputFormatClass();
+            org.apache.hadoop.mapred.InputFormat inputFormat =
+                getMapRedInputFormat(jobConf, inputFormatClass);
+
+            //Call getSplit on the InputFormat, create an
+            //HCatSplit for each underlying split
+            //NumSplits is 0 for our purposes
+            org.apache.hadoop.mapred.InputSplit[] baseSplits =
+                inputFormat.getSplits(jobConf, 0);
+
+            for (org.apache.hadoop.mapred.InputSplit split : baseSplits) {
+                splits.add(new HCatSplit(
+                    partitionInfo,
+                    split, allCols));
+            }
+        }
+
+        return splits;
+    }
+
+    /**
+     * Create the RecordReader for the given InputSplit. Returns the underlying
+     * RecordReader if the required operations are supported and schema matches
+     * with HCatTable schema. Returns an HCatRecordReader if operations need to
+     * be implemented in HCat.
+     * @param split the split
+     * @param taskContext the task attempt context
+     * @return the record reader instance, either an HCatRecordReader(later) or
+     *         the underlying storage handler's RecordReader
+     * @throws IOException or InterruptedException
+     */
+    @Override
+    public RecordReader<WritableComparable, HCatRecord>
+    createRecordReader(InputSplit split,
+                       TaskAttemptContext taskContext) throws IOException, InterruptedException {
+
+        HCatSplit hcatSplit = InternalUtil.castToHCatSplit(split);
+        PartInfo partitionInfo = hcatSplit.getPartitionInfo();
+        JobContext jobContext = taskContext;
+
+        HCatStorageHandler storageHandler = HCatUtil.getStorageHandler(
+            jobContext.getConfiguration(), partitionInfo);
+
+        JobConf jobConf = HCatUtil.getJobConfFromContext(jobContext);
+        Map<String, String> jobProperties = partitionInfo.getJobProperties();
+        HCatUtil.copyJobPropertiesToJobConf(jobProperties, jobConf);
+
+        Map<String, String> valuesNotInDataCols = getColValsNotInDataColumns(
+            getOutputSchema(jobContext), partitionInfo
         );
 
-    return new HCatRecordReader(storageHandler, valuesNotInDataCols);
-  }
-
-
-  /**
-   * gets values for fields requested by output schema which will not be in the data
-   */
-  private static Map<String,String> getColValsNotInDataColumns(HCatSchema outputSchema,
-      PartInfo partInfo){
-    HCatSchema dataSchema = partInfo.getPartitionSchema();
-    Map<String,String> vals = new HashMap<String,String>();
-    for (String fieldName : outputSchema.getFieldNames()){
-      if (dataSchema.getPosition(fieldName) == null){
-        // this entry of output is not present in the output schema
-        // so, we first check the table schema to see if it is a part col
-        
-        if (partInfo.getPartitionValues().containsKey(fieldName)){
-          vals.put(fieldName, partInfo.getPartitionValues().get(fieldName));
-        } else {
-          vals.put(fieldName, null);
-        }
-      }
-    }
-    return vals;
-  }
-
-  /**
-   * Gets the HCatTable schema for the table specified in the HCatInputFormat.setInput call
-   * on the specified job context. This information is available only after HCatInputFormat.setInput
-   * has been called for a JobContext.
-   * @param context the context
-   * @return the table schema
-   * @throws IOException if HCatInputFormat.setInput has not been called 
-   *                     for the current context
-   */
-  public static HCatSchema getTableSchema(JobContext context) 
-  throws IOException {
-    InputJobInfo inputJobInfo = getJobInfo(context);
-      HCatSchema allCols = new HCatSchema(new LinkedList<HCatFieldSchema>());
-      for(HCatFieldSchema field: 
-          inputJobInfo.getTableInfo().getDataColumns().getFields())
-          allCols.append(field);
-      for(HCatFieldSchema field: 
-          inputJobInfo.getTableInfo().getPartitionColumns().getFields())
-          allCols.append(field);
-    return allCols;
-  }
-
-  /**
-   * Gets the InputJobInfo object by reading the Configuration and deserializing
-   * the string. If InputJobInfo is not present in the configuration, throws an
-   * exception since that means HCatInputFormat.setInput has not been called.
-   * @param jobContext the job context
-   * @return the InputJobInfo object
-   * @throws IOException the exception
-   */
-  private static InputJobInfo getJobInfo(JobContext jobContext) 
-    throws IOException {
-    String jobString = jobContext.getConfiguration().get(
-                                  HCatConstants.HCAT_KEY_JOB_INFO);
-    if( jobString == null ) {
-      throw new IOException("job information not found in JobContext."
-         + " HCatInputFormat.setInput() not called?");
+        return new HCatRecordReader(storageHandler, valuesNotInDataCols);
     }
 
-    return (InputJobInfo) HCatUtil.deserialize(jobString);
-  }
 
-  private void setInputPath(JobConf jobConf, String location) 
-  throws IOException{
+    /**
+     * gets values for fields requested by output schema which will not be in the data
+     */
+    private static Map<String, String> getColValsNotInDataColumns(HCatSchema outputSchema,
+                                                                  PartInfo partInfo) {
+        HCatSchema dataSchema = partInfo.getPartitionSchema();
+        Map<String, String> vals = new HashMap<String, String>();
+        for (String fieldName : outputSchema.getFieldNames()) {
+            if (dataSchema.getPosition(fieldName) == null) {
+                // this entry of output is not present in the output schema
+                // so, we first check the table schema to see if it is a part col
 
-    // ideally we should just call FileInputFormat.setInputPaths() here - but
-    // that won't work since FileInputFormat.setInputPaths() needs
-    // a Job object instead of a JobContext which we are handed here
-
-    int length = location.length();
-    int curlyOpen = 0;
-    int pathStart = 0;
-    boolean globPattern = false;
-    List<String> pathStrings = new ArrayList<String>();
-
-    for (int i=0; i<length; i++) {
-      char ch = location.charAt(i);
-      switch(ch) {
-      case '{' : {
-        curlyOpen++;
-        if (!globPattern) {
-          globPattern = true;
+                if (partInfo.getPartitionValues().containsKey(fieldName)) {
+                    vals.put(fieldName, partInfo.getPartitionValues().get(fieldName));
+                } else {
+                    vals.put(fieldName, null);
+                }
+            }
         }
-        break;
-      }
-      case '}' : {
-        curlyOpen--;
-        if (curlyOpen == 0 && globPattern) {
-          globPattern = false;
-        }
-        break;
-      }
-      case ',' : {
-        if (!globPattern) {
-          pathStrings.add(location.substring(pathStart, i));
-          pathStart = i + 1 ;
-        }
-        break;
-      }
-      }
-    }
-    pathStrings.add(location.substring(pathStart, length));
-
-    Path[] paths = StringUtils.stringToPath(pathStrings.toArray(new String[0]));
-
-    FileSystem fs = FileSystem.get(jobConf);
-    Path path = paths[0].makeQualified(fs);
-    StringBuilder str = new StringBuilder(StringUtils.escapeString(
-                                                          path.toString()));
-    for(int i = 1; i < paths.length;i++) {
-      str.append(StringUtils.COMMA_STR);
-      path = paths[i].makeQualified(fs);
-      str.append(StringUtils.escapeString(path.toString()));
+        return vals;
     }
 
-    jobConf.set("mapred.input.dir", str.toString());
-  }
+    /**
+     * Gets the HCatTable schema for the table specified in the HCatInputFormat.setInput call
+     * on the specified job context. This information is available only after HCatInputFormat.setInput
+     * has been called for a JobContext.
+     * @param context the context
+     * @return the table schema
+     * @throws IOException if HCatInputFormat.setInput has not been called
+     *                     for the current context
+     */
+    public static HCatSchema getTableSchema(JobContext context)
+        throws IOException {
+        InputJobInfo inputJobInfo = getJobInfo(context);
+        HCatSchema allCols = new HCatSchema(new LinkedList<HCatFieldSchema>());
+        for (HCatFieldSchema field :
+            inputJobInfo.getTableInfo().getDataColumns().getFields())
+            allCols.append(field);
+        for (HCatFieldSchema field :
+            inputJobInfo.getTableInfo().getPartitionColumns().getFields())
+            allCols.append(field);
+        return allCols;
+    }
+
+    /**
+     * Gets the InputJobInfo object by reading the Configuration and deserializing
+     * the string. If InputJobInfo is not present in the configuration, throws an
+     * exception since that means HCatInputFormat.setInput has not been called.
+     * @param jobContext the job context
+     * @return the InputJobInfo object
+     * @throws IOException the exception
+     */
+    private static InputJobInfo getJobInfo(JobContext jobContext)
+        throws IOException {
+        String jobString = jobContext.getConfiguration().get(
+            HCatConstants.HCAT_KEY_JOB_INFO);
+        if (jobString == null) {
+            throw new IOException("job information not found in JobContext."
+                + " HCatInputFormat.setInput() not called?");
+        }
+
+        return (InputJobInfo) HCatUtil.deserialize(jobString);
+    }
+
+    private void setInputPath(JobConf jobConf, String location)
+        throws IOException {
+
+        // ideally we should just call FileInputFormat.setInputPaths() here - but
+        // that won't work since FileInputFormat.setInputPaths() needs
+        // a Job object instead of a JobContext which we are handed here
+
+        int length = location.length();
+        int curlyOpen = 0;
+        int pathStart = 0;
+        boolean globPattern = false;
+        List<String> pathStrings = new ArrayList<String>();
+
+        for (int i = 0; i < length; i++) {
+            char ch = location.charAt(i);
+            switch (ch) {
+            case '{': {
+                curlyOpen++;
+                if (!globPattern) {
+                    globPattern = true;
+                }
+                break;
+            }
+            case '}': {
+                curlyOpen--;
+                if (curlyOpen == 0 && globPattern) {
+                    globPattern = false;
+                }
+                break;
+            }
+            case ',': {
+                if (!globPattern) {
+                    pathStrings.add(location.substring(pathStart, i));
+                    pathStart = i + 1;
+                }
+                break;
+            }
+            }
+        }
+        pathStrings.add(location.substring(pathStart, length));
+
+        Path[] paths = StringUtils.stringToPath(pathStrings.toArray(new String[0]));
+
+        FileSystem fs = FileSystem.get(jobConf);
+        Path path = paths[0].makeQualified(fs);
+        StringBuilder str = new StringBuilder(StringUtils.escapeString(
+            path.toString()));
+        for (int i = 1; i < paths.length; i++) {
+            str.append(StringUtils.COMMA_STR);
+            path = paths[i].makeQualified(fs);
+            str.append(StringUtils.escapeString(path.toString()));
+        }
+
+        jobConf.set("mapred.input.dir", str.toString());
+    }
 
 }
diff --git a/src/java/org/apache/hcatalog/mapreduce/HCatBaseOutputFormat.java b/src/java/org/apache/hcatalog/mapreduce/HCatBaseOutputFormat.java
index 3d934c5..d741b7f 100644
--- a/src/java/org/apache/hcatalog/mapreduce/HCatBaseOutputFormat.java
+++ b/src/java/org/apache/hcatalog/mapreduce/HCatBaseOutputFormat.java
@@ -39,99 +39,99 @@
 
 //  static final private Log LOG = LogFactory.getLog(HCatBaseOutputFormat.class);
 
-  /**
-   * Gets the table schema for the table specified in the HCatOutputFormat.setOutput call
-   * on the specified job context.
-   * @param context the context
-   * @return the table schema
-   * @throws IOException if HCatOutputFromat.setOutput has not been called for the passed context
-   */
-  public static HCatSchema getTableSchema(JobContext context) throws IOException {
-      OutputJobInfo jobInfo = getJobInfo(context);
-      return jobInfo.getTableInfo().getDataColumns();
-  }
+    /**
+     * Gets the table schema for the table specified in the HCatOutputFormat.setOutput call
+     * on the specified job context.
+     * @param context the context
+     * @return the table schema
+     * @throws IOException if HCatOutputFromat.setOutput has not been called for the passed context
+     */
+    public static HCatSchema getTableSchema(JobContext context) throws IOException {
+        OutputJobInfo jobInfo = getJobInfo(context);
+        return jobInfo.getTableInfo().getDataColumns();
+    }
 
-  /**
-   * Check for validity of the output-specification for the job.
-   * @param context information about the job
-   * @throws IOException when output should not be attempted
-   */
-  @Override
-  public void checkOutputSpecs(JobContext context
-                                        ) throws IOException, InterruptedException {
-    getOutputFormat(context).checkOutputSpecs(context);
-  }
+    /**
+     * Check for validity of the output-specification for the job.
+     * @param context information about the job
+     * @throws IOException when output should not be attempted
+     */
+    @Override
+    public void checkOutputSpecs(JobContext context
+    ) throws IOException, InterruptedException {
+        getOutputFormat(context).checkOutputSpecs(context);
+    }
 
-  /**
-   * Gets the output format instance.
-   * @param context the job context
-   * @return the output format instance
-   * @throws IOException
-   */
-  protected OutputFormat<WritableComparable<?>, HCatRecord> getOutputFormat(JobContext context) throws IOException {
-      OutputJobInfo jobInfo = getJobInfo(context);
-      HCatStorageHandler  storageHandler = HCatUtil.getStorageHandler(context.getConfiguration(), jobInfo.getTableInfo().getStorerInfo());
-      //why do we need this?
-      configureOutputStorageHandler(context);
-      return storageHandler.getOutputFormatContainer(ReflectionUtils.newInstance(storageHandler.getOutputFormatClass(),context.getConfiguration()));
-  }
+    /**
+     * Gets the output format instance.
+     * @param context the job context
+     * @return the output format instance
+     * @throws IOException
+     */
+    protected OutputFormat<WritableComparable<?>, HCatRecord> getOutputFormat(JobContext context) throws IOException {
+        OutputJobInfo jobInfo = getJobInfo(context);
+        HCatStorageHandler storageHandler = HCatUtil.getStorageHandler(context.getConfiguration(), jobInfo.getTableInfo().getStorerInfo());
+        //why do we need this?
+        configureOutputStorageHandler(context);
+        return storageHandler.getOutputFormatContainer(ReflectionUtils.newInstance(storageHandler.getOutputFormatClass(), context.getConfiguration()));
+    }
 
-  /**
-   * Gets the HCatOuputJobInfo object by reading the Configuration and deserializing
-   * the string. If InputJobInfo is not present in the configuration, throws an
-   * exception since that means HCatOutputFormat.setOutput has not been called.
-   * @param jobContext the job context
-   * @return the OutputJobInfo object
-   * @throws IOException the IO exception
-   */
-  public static OutputJobInfo getJobInfo(JobContext jobContext) throws IOException {
-      String jobString = jobContext.getConfiguration().get(HCatConstants.HCAT_KEY_OUTPUT_INFO);
-      if( jobString == null ) {
-          throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED);
-      }
+    /**
+     * Gets the HCatOuputJobInfo object by reading the Configuration and deserializing
+     * the string. If InputJobInfo is not present in the configuration, throws an
+     * exception since that means HCatOutputFormat.setOutput has not been called.
+     * @param jobContext the job context
+     * @return the OutputJobInfo object
+     * @throws IOException the IO exception
+     */
+    public static OutputJobInfo getJobInfo(JobContext jobContext) throws IOException {
+        String jobString = jobContext.getConfiguration().get(HCatConstants.HCAT_KEY_OUTPUT_INFO);
+        if (jobString == null) {
+            throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED);
+        }
 
-      return (OutputJobInfo) HCatUtil.deserialize(jobString);
-  }
+        return (OutputJobInfo) HCatUtil.deserialize(jobString);
+    }
 
-  /**
-   * Configure the output storage handler
-   * @param jobContext the job context
-   * @throws IOException
-   */
-  @SuppressWarnings("unchecked")
-  static void configureOutputStorageHandler(
-          JobContext jobContext) throws IOException {
-    configureOutputStorageHandler(jobContext,(List<String>)null);
-  }
+    /**
+     * Configure the output storage handler
+     * @param jobContext the job context
+     * @throws IOException
+     */
+    @SuppressWarnings("unchecked")
+    static void configureOutputStorageHandler(
+        JobContext jobContext) throws IOException {
+        configureOutputStorageHandler(jobContext, (List<String>) null);
+    }
 
-  /**
-   * Configure the output storage handler with allowing specification of missing dynamic partvals
-   * @param jobContext the job context
-   * @param dynamicPartVals
-   * @throws IOException
-   */
-  @SuppressWarnings("unchecked")
-  static void configureOutputStorageHandler(
-          JobContext jobContext, List<String> dynamicPartVals) throws IOException {
-      try {
-          OutputJobInfo jobInfo = (OutputJobInfo)HCatUtil.deserialize(jobContext.getConfiguration().get(HCatConstants.HCAT_KEY_OUTPUT_INFO));
-          HCatStorageHandler storageHandler = HCatUtil.getStorageHandler(jobContext.getConfiguration(),jobInfo.getTableInfo().getStorerInfo());
+    /**
+     * Configure the output storage handler with allowing specification of missing dynamic partvals
+     * @param jobContext the job context
+     * @param dynamicPartVals
+     * @throws IOException
+     */
+    @SuppressWarnings("unchecked")
+    static void configureOutputStorageHandler(
+        JobContext jobContext, List<String> dynamicPartVals) throws IOException {
+        try {
+            OutputJobInfo jobInfo = (OutputJobInfo) HCatUtil.deserialize(jobContext.getConfiguration().get(HCatConstants.HCAT_KEY_OUTPUT_INFO));
+            HCatStorageHandler storageHandler = HCatUtil.getStorageHandler(jobContext.getConfiguration(), jobInfo.getTableInfo().getStorerInfo());
 
-          Map<String, String> partitionValues = jobInfo.getPartitionValues();
-          String location = jobInfo.getLocation();
+            Map<String, String> partitionValues = jobInfo.getPartitionValues();
+            String location = jobInfo.getLocation();
 
-          if (dynamicPartVals != null){
-            // dynamic part vals specified
-            List<String> dynamicPartKeys = jobInfo.getDynamicPartitioningKeys();
-            if (dynamicPartVals.size() != dynamicPartKeys.size()){
-              throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES, 
-                  "Unable to configure dynamic partitioning for storage handler, mismatch between"
-                  + " number of partition values obtained["+dynamicPartVals.size()
-                  + "] and number of partition values required["+dynamicPartKeys.size()+"]");
-            }
-            for (int i = 0; i < dynamicPartKeys.size(); i++){
-              partitionValues.put(dynamicPartKeys.get(i), dynamicPartVals.get(i));
-            }
+            if (dynamicPartVals != null) {
+                // dynamic part vals specified
+                List<String> dynamicPartKeys = jobInfo.getDynamicPartitioningKeys();
+                if (dynamicPartVals.size() != dynamicPartKeys.size()) {
+                    throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES,
+                        "Unable to configure dynamic partitioning for storage handler, mismatch between"
+                            + " number of partition values obtained[" + dynamicPartVals.size()
+                            + "] and number of partition values required[" + dynamicPartKeys.size() + "]");
+                }
+                for (int i = 0; i < dynamicPartKeys.size(); i++) {
+                    partitionValues.put(dynamicPartKeys.get(i), dynamicPartVals.get(i));
+                }
 
 //            // re-home location, now that we know the rest of the partvals
 //            Table table = jobInfo.getTableInfo().getTable();
@@ -140,85 +140,85 @@
 //            for(FieldSchema schema : table.getPartitionKeys()) {
 //              partitionCols.add(schema.getName());
 //            }
-            jobInfo.setPartitionValues(partitionValues);
-          }
+                jobInfo.setPartitionValues(partitionValues);
+            }
 
-          HCatUtil.configureOutputStorageHandler(storageHandler,jobContext,jobInfo);
-      } catch(Exception e) {
-        if (e instanceof HCatException){
-          throw (HCatException)e;
-        }else{
-          throw new HCatException(ErrorType.ERROR_INIT_STORAGE_HANDLER, e);
+            HCatUtil.configureOutputStorageHandler(storageHandler, jobContext, jobInfo);
+        } catch (Exception e) {
+            if (e instanceof HCatException) {
+                throw (HCatException) e;
+            } else {
+                throw new HCatException(ErrorType.ERROR_INIT_STORAGE_HANDLER, e);
+            }
         }
-      }
-  }
-
-  /**
-   * Configure the output storage handler, with allowing specification 
-   * of partvals from which it picks the dynamic partvals
-   * @param context the job context
-   * @param jobInfo the output job info
-   * @param fullPartSpec
-   * @throws IOException
-   */
-
-  protected static void configureOutputStorageHandler(
-      JobContext context, OutputJobInfo jobInfo,
-      Map<String, String> fullPartSpec) throws IOException {
-    List<String> dynamicPartKeys = jobInfo.getDynamicPartitioningKeys();
-    if ((dynamicPartKeys == null)||(dynamicPartKeys.isEmpty())){
-      configureOutputStorageHandler(context, (List<String>) null);
-    }else{
-      List<String> dynKeyVals = new ArrayList<String>();
-      for (String dynamicPartKey : dynamicPartKeys){
-        dynKeyVals.add(fullPartSpec.get(dynamicPartKey));
-      }
-      configureOutputStorageHandler(context, dynKeyVals);
-    }
-  }
-
-
-  protected static void setPartDetails(OutputJobInfo jobInfo, final HCatSchema schema,
-      Map<String, String> partMap) throws HCatException, IOException {
-    List<Integer> posOfPartCols = new ArrayList<Integer>();
-    List<Integer> posOfDynPartCols = new ArrayList<Integer>();
-
-    // If partition columns occur in data, we want to remove them.
-    // So, find out positions of partition columns in schema provided by user.
-    // We also need to update the output Schema with these deletions.
-    
-    // Note that, output storage handlers never sees partition columns in data
-    // or schema.
-
-    HCatSchema schemaWithoutParts = new HCatSchema(schema.getFields());
-    for(String partKey : partMap.keySet()){
-      Integer idx;
-      if((idx = schema.getPosition(partKey)) != null){
-        posOfPartCols.add(idx);
-        schemaWithoutParts.remove(schema.get(partKey));
-      }
     }
 
-    // Also, if dynamic partitioning is being used, we want to
-    // set appropriate list of columns for the columns to be dynamically specified.
-    // These would be partition keys too, so would also need to be removed from 
-    // output schema and partcols
+    /**
+     * Configure the output storage handler, with allowing specification
+     * of partvals from which it picks the dynamic partvals
+     * @param context the job context
+     * @param jobInfo the output job info
+     * @param fullPartSpec
+     * @throws IOException
+     */
 
-    if (jobInfo.isDynamicPartitioningUsed()){
-      for (String partKey : jobInfo.getDynamicPartitioningKeys()){
-        Integer idx;
-        if((idx = schema.getPosition(partKey)) != null){
-          posOfPartCols.add(idx);
-          posOfDynPartCols.add(idx);
-          schemaWithoutParts.remove(schema.get(partKey));
+    protected static void configureOutputStorageHandler(
+        JobContext context, OutputJobInfo jobInfo,
+        Map<String, String> fullPartSpec) throws IOException {
+        List<String> dynamicPartKeys = jobInfo.getDynamicPartitioningKeys();
+        if ((dynamicPartKeys == null) || (dynamicPartKeys.isEmpty())) {
+            configureOutputStorageHandler(context, (List<String>) null);
+        } else {
+            List<String> dynKeyVals = new ArrayList<String>();
+            for (String dynamicPartKey : dynamicPartKeys) {
+                dynKeyVals.add(fullPartSpec.get(dynamicPartKey));
+            }
+            configureOutputStorageHandler(context, dynKeyVals);
         }
-      }
     }
-    
-    HCatUtil.validatePartitionSchema(
-        new Table(jobInfo.getTableInfo().getTable()), schemaWithoutParts);
-    jobInfo.setPosOfPartCols(posOfPartCols);
-    jobInfo.setPosOfDynPartCols(posOfDynPartCols);
-    jobInfo.setOutputSchema(schemaWithoutParts);
-  }
+
+
+    protected static void setPartDetails(OutputJobInfo jobInfo, final HCatSchema schema,
+                                         Map<String, String> partMap) throws HCatException, IOException {
+        List<Integer> posOfPartCols = new ArrayList<Integer>();
+        List<Integer> posOfDynPartCols = new ArrayList<Integer>();
+
+        // If partition columns occur in data, we want to remove them.
+        // So, find out positions of partition columns in schema provided by user.
+        // We also need to update the output Schema with these deletions.
+
+        // Note that, output storage handlers never sees partition columns in data
+        // or schema.
+
+        HCatSchema schemaWithoutParts = new HCatSchema(schema.getFields());
+        for (String partKey : partMap.keySet()) {
+            Integer idx;
+            if ((idx = schema.getPosition(partKey)) != null) {
+                posOfPartCols.add(idx);
+                schemaWithoutParts.remove(schema.get(partKey));
+            }
+        }
+
+        // Also, if dynamic partitioning is being used, we want to
+        // set appropriate list of columns for the columns to be dynamically specified.
+        // These would be partition keys too, so would also need to be removed from
+        // output schema and partcols
+
+        if (jobInfo.isDynamicPartitioningUsed()) {
+            for (String partKey : jobInfo.getDynamicPartitioningKeys()) {
+                Integer idx;
+                if ((idx = schema.getPosition(partKey)) != null) {
+                    posOfPartCols.add(idx);
+                    posOfDynPartCols.add(idx);
+                    schemaWithoutParts.remove(schema.get(partKey));
+                }
+            }
+        }
+
+        HCatUtil.validatePartitionSchema(
+            new Table(jobInfo.getTableInfo().getTable()), schemaWithoutParts);
+        jobInfo.setPosOfPartCols(posOfPartCols);
+        jobInfo.setPosOfDynPartCols(posOfDynPartCols);
+        jobInfo.setOutputSchema(schemaWithoutParts);
+    }
 }
diff --git a/src/java/org/apache/hcatalog/mapreduce/HCatInputFormat.java b/src/java/org/apache/hcatalog/mapreduce/HCatInputFormat.java
index f1d8276..b668d7a 100644
--- a/src/java/org/apache/hcatalog/mapreduce/HCatInputFormat.java
+++ b/src/java/org/apache/hcatalog/mapreduce/HCatInputFormat.java
@@ -25,23 +25,23 @@
 /** The InputFormat to use to read data from HCatalog. */
 public class HCatInputFormat extends HCatBaseInputFormat {
 
-  /**
-   * Set the input information to use for the job. This queries the metadata server 
-   * with the specified partition predicates, gets the matching partitions, and 
-   * puts the information in the conf object. The inputInfo object is updated 
-   * with information needed in the client context.
-   * @param job the job object
-   * @param inputJobInfo the input information about the table to read
-   * @throws IOException the exception in communicating with the metadata server
-   */
-  public static void setInput(Job job,
-      InputJobInfo inputJobInfo) throws IOException {
-    try {
-      InitializeInput.setInput(job, inputJobInfo);
-    } catch (Exception e) {
-      throw new IOException(e);
+    /**
+     * Set the input information to use for the job. This queries the metadata server
+     * with the specified partition predicates, gets the matching partitions, and
+     * puts the information in the conf object. The inputInfo object is updated
+     * with information needed in the client context.
+     * @param job the job object
+     * @param inputJobInfo the input information about the table to read
+     * @throws IOException the exception in communicating with the metadata server
+     */
+    public static void setInput(Job job,
+                                InputJobInfo inputJobInfo) throws IOException {
+        try {
+            InitializeInput.setInput(job, inputJobInfo);
+        } catch (Exception e) {
+            throw new IOException(e);
+        }
     }
-  }
 
 
 }
diff --git a/src/java/org/apache/hcatalog/mapreduce/HCatOutputFormat.java b/src/java/org/apache/hcatalog/mapreduce/HCatOutputFormat.java
index e095e4d..30c9e6b 100644
--- a/src/java/org/apache/hcatalog/mapreduce/HCatOutputFormat.java
+++ b/src/java/org/apache/hcatalog/mapreduce/HCatOutputFormat.java
@@ -66,135 +66,135 @@
      */
     @SuppressWarnings("unchecked")
     public static void setOutput(Job job, OutputJobInfo outputJobInfo) throws IOException {
-      HiveMetaStoreClient client = null;
+        HiveMetaStoreClient client = null;
 
-      try {
+        try {
 
-        Configuration conf = job.getConfiguration();
-        HiveConf hiveConf = HCatUtil.getHiveConf(conf);
-        client = HCatUtil.getHiveClient(hiveConf);
-        Table table = HCatUtil.getTable(client, outputJobInfo.getDatabaseName(),
-            outputJobInfo.getTableName());
+            Configuration conf = job.getConfiguration();
+            HiveConf hiveConf = HCatUtil.getHiveConf(conf);
+            client = HCatUtil.getHiveClient(hiveConf);
+            Table table = HCatUtil.getTable(client, outputJobInfo.getDatabaseName(),
+                outputJobInfo.getTableName());
 
-        List<String> indexList = client.listIndexNames(outputJobInfo.getDatabaseName(), outputJobInfo.getTableName(), Short.MAX_VALUE);
+            List<String> indexList = client.listIndexNames(outputJobInfo.getDatabaseName(), outputJobInfo.getTableName(), Short.MAX_VALUE);
 
-        for (String indexName : indexList) {
-            Index index = client.getIndex(outputJobInfo.getDatabaseName(), outputJobInfo.getTableName(), indexName);
-            if (!index.isDeferredRebuild()) {
-                throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a table with an automatic index from Pig/Mapreduce is not supported");
+            for (String indexName : indexList) {
+                Index index = client.getIndex(outputJobInfo.getDatabaseName(), outputJobInfo.getTableName(), indexName);
+                if (!index.isDeferredRebuild()) {
+                    throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a table with an automatic index from Pig/Mapreduce is not supported");
+                }
             }
-        }
-        StorageDescriptor sd = table.getTTable().getSd();
+            StorageDescriptor sd = table.getTTable().getSd();
 
-        if (sd.isCompressed()) {
-            throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a compressed partition from Pig/Mapreduce is not supported");
-        }
-
-        if (sd.getBucketCols()!=null && !sd.getBucketCols().isEmpty()) {
-            throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a partition with bucket definition from Pig/Mapreduce is not supported");
-        }
-
-        if (sd.getSortCols()!=null && !sd.getSortCols().isEmpty()) {
-            throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a partition with sorted column definition from Pig/Mapreduce is not supported");
-        }
-
-        if (table.getTTable().getPartitionKeysSize() == 0 ){
-          if ((outputJobInfo.getPartitionValues() != null) && (!outputJobInfo.getPartitionValues().isEmpty())){
-            // attempt made to save partition values in non-partitioned table - throw error.
-            throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES,
-                "Partition values specified for non-partitioned table");
-          }
-          // non-partitioned table
-          outputJobInfo.setPartitionValues(new HashMap<String, String>());
-
-        } else {
-          // partitioned table, we expect partition values
-          // convert user specified map to have lower case key names
-          Map<String, String> valueMap = new HashMap<String, String>();
-          if (outputJobInfo.getPartitionValues() != null){
-            for(Map.Entry<String, String> entry : outputJobInfo.getPartitionValues().entrySet()) {
-              valueMap.put(entry.getKey().toLowerCase(), entry.getValue());
-            }
-          }
-
-          if ((outputJobInfo.getPartitionValues() == null)
-              || (outputJobInfo.getPartitionValues().size() < table.getTTable().getPartitionKeysSize())){
-            // dynamic partition usecase - partition values were null, or not all were specified
-            // need to figure out which keys are not specified.
-            List<String> dynamicPartitioningKeys = new ArrayList<String>();
-            boolean firstItem = true;
-            for (FieldSchema fs : table.getPartitionKeys()){
-              if (!valueMap.containsKey(fs.getName().toLowerCase())){
-                dynamicPartitioningKeys.add(fs.getName().toLowerCase());
-              }
+            if (sd.isCompressed()) {
+                throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a compressed partition from Pig/Mapreduce is not supported");
             }
 
-            if (valueMap.size() + dynamicPartitioningKeys.size() != table.getTTable().getPartitionKeysSize()){
-              // If this isn't equal, then bogus key values have been inserted, error out.
-              throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES,"Invalid partition keys specified");
+            if (sd.getBucketCols() != null && !sd.getBucketCols().isEmpty()) {
+                throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a partition with bucket definition from Pig/Mapreduce is not supported");
             }
 
-            outputJobInfo.setDynamicPartitioningKeys(dynamicPartitioningKeys);
-            String dynHash;
-            if ((dynHash = conf.get(HCatConstants.HCAT_DYNAMIC_PTN_JOBID)) == null){
-              dynHash = String.valueOf(Math.random());
+            if (sd.getSortCols() != null && !sd.getSortCols().isEmpty()) {
+                throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a partition with sorted column definition from Pig/Mapreduce is not supported");
+            }
+
+            if (table.getTTable().getPartitionKeysSize() == 0) {
+                if ((outputJobInfo.getPartitionValues() != null) && (!outputJobInfo.getPartitionValues().isEmpty())) {
+                    // attempt made to save partition values in non-partitioned table - throw error.
+                    throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES,
+                        "Partition values specified for non-partitioned table");
+                }
+                // non-partitioned table
+                outputJobInfo.setPartitionValues(new HashMap<String, String>());
+
+            } else {
+                // partitioned table, we expect partition values
+                // convert user specified map to have lower case key names
+                Map<String, String> valueMap = new HashMap<String, String>();
+                if (outputJobInfo.getPartitionValues() != null) {
+                    for (Map.Entry<String, String> entry : outputJobInfo.getPartitionValues().entrySet()) {
+                        valueMap.put(entry.getKey().toLowerCase(), entry.getValue());
+                    }
+                }
+
+                if ((outputJobInfo.getPartitionValues() == null)
+                    || (outputJobInfo.getPartitionValues().size() < table.getTTable().getPartitionKeysSize())) {
+                    // dynamic partition usecase - partition values were null, or not all were specified
+                    // need to figure out which keys are not specified.
+                    List<String> dynamicPartitioningKeys = new ArrayList<String>();
+                    boolean firstItem = true;
+                    for (FieldSchema fs : table.getPartitionKeys()) {
+                        if (!valueMap.containsKey(fs.getName().toLowerCase())) {
+                            dynamicPartitioningKeys.add(fs.getName().toLowerCase());
+                        }
+                    }
+
+                    if (valueMap.size() + dynamicPartitioningKeys.size() != table.getTTable().getPartitionKeysSize()) {
+                        // If this isn't equal, then bogus key values have been inserted, error out.
+                        throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES, "Invalid partition keys specified");
+                    }
+
+                    outputJobInfo.setDynamicPartitioningKeys(dynamicPartitioningKeys);
+                    String dynHash;
+                    if ((dynHash = conf.get(HCatConstants.HCAT_DYNAMIC_PTN_JOBID)) == null) {
+                        dynHash = String.valueOf(Math.random());
 //              LOG.info("New dynHash : ["+dynHash+"]");
 //            }else{
 //              LOG.info("Old dynHash : ["+dynHash+"]");
+                    }
+                    conf.set(HCatConstants.HCAT_DYNAMIC_PTN_JOBID, dynHash);
+
+                }
+
+                outputJobInfo.setPartitionValues(valueMap);
             }
-            conf.set(HCatConstants.HCAT_DYNAMIC_PTN_JOBID, dynHash);
 
-          }
+            HCatSchema tableSchema = HCatUtil.extractSchema(table);
+            StorerInfo storerInfo =
+                InternalUtil.extractStorerInfo(table.getTTable().getSd(), table.getParameters());
 
-          outputJobInfo.setPartitionValues(valueMap);
+            List<String> partitionCols = new ArrayList<String>();
+            for (FieldSchema schema : table.getPartitionKeys()) {
+                partitionCols.add(schema.getName());
+            }
+
+            HCatStorageHandler storageHandler = HCatUtil.getStorageHandler(job.getConfiguration(), storerInfo);
+
+            //Serialize the output info into the configuration
+            outputJobInfo.setTableInfo(HCatTableInfo.valueOf(table.getTTable()));
+            outputJobInfo.setOutputSchema(tableSchema);
+            harRequested = getHarRequested(hiveConf);
+            outputJobInfo.setHarRequested(harRequested);
+            maxDynamicPartitions = getMaxDynamicPartitions(hiveConf);
+            outputJobInfo.setMaximumDynamicPartitions(maxDynamicPartitions);
+
+            HCatUtil.configureOutputStorageHandler(storageHandler, job, outputJobInfo);
+
+            Path tblPath = new Path(table.getTTable().getSd().getLocation());
+
+            /*  Set the umask in conf such that files/dirs get created with table-dir
+            * permissions. Following three assumptions are made:
+            * 1. Actual files/dirs creation is done by RecordWriter of underlying
+            * output format. It is assumed that they use default permissions while creation.
+            * 2. Default Permissions = FsPermission.getDefault() = 777.
+            * 3. UMask is honored by underlying filesystem.
+            */
+
+            FsPermission.setUMask(conf, FsPermission.getDefault().applyUMask(
+                tblPath.getFileSystem(conf).getFileStatus(tblPath).getPermission()));
+
+            if (Security.getInstance().isSecurityEnabled()) {
+                Security.getInstance().handleSecurity(job, outputJobInfo, client, conf, harRequested);
+            }
+        } catch (Exception e) {
+            if (e instanceof HCatException) {
+                throw (HCatException) e;
+            } else {
+                throw new HCatException(ErrorType.ERROR_SET_OUTPUT, e);
+            }
+        } finally {
+            HCatUtil.closeHiveClientQuietly(client);
         }
-
-        HCatSchema tableSchema = HCatUtil.extractSchema(table);
-        StorerInfo storerInfo =
-            InternalUtil.extractStorerInfo(table.getTTable().getSd(), table.getParameters());
-
-        List<String> partitionCols = new ArrayList<String>();
-        for(FieldSchema schema : table.getPartitionKeys()) {
-          partitionCols.add(schema.getName());
-        }
-
-       HCatStorageHandler storageHandler = HCatUtil.getStorageHandler(job.getConfiguration(), storerInfo);
-
-        //Serialize the output info into the configuration
-        outputJobInfo.setTableInfo(HCatTableInfo.valueOf(table.getTTable()));
-        outputJobInfo.setOutputSchema(tableSchema);
-        harRequested = getHarRequested(hiveConf);
-        outputJobInfo.setHarRequested(harRequested);
-        maxDynamicPartitions = getMaxDynamicPartitions(hiveConf);
-        outputJobInfo.setMaximumDynamicPartitions(maxDynamicPartitions);
-
-        HCatUtil.configureOutputStorageHandler(storageHandler,job,outputJobInfo);
-
-        Path tblPath = new Path(table.getTTable().getSd().getLocation());
-
-        /*  Set the umask in conf such that files/dirs get created with table-dir
-         * permissions. Following three assumptions are made:
-         * 1. Actual files/dirs creation is done by RecordWriter of underlying
-         * output format. It is assumed that they use default permissions while creation.
-         * 2. Default Permissions = FsPermission.getDefault() = 777.
-         * 3. UMask is honored by underlying filesystem.
-         */
-
-        FsPermission.setUMask(conf, FsPermission.getDefault().applyUMask(
-            tblPath.getFileSystem(conf).getFileStatus(tblPath).getPermission()));
-
-        if(Security.getInstance().isSecurityEnabled()) {
-            Security.getInstance().handleSecurity(job, outputJobInfo, client, conf, harRequested);
-        }
-      } catch(Exception e) {
-        if( e instanceof HCatException ) {
-          throw (HCatException) e;
-        } else {
-          throw new HCatException(ErrorType.ERROR_SET_OUTPUT, e);
-        }
-      } finally {
-        HCatUtil.closeHiveClientQuietly(client);
-      }
     }
 
     /**
@@ -207,7 +207,7 @@
     public static void setSchema(final Job job, final HCatSchema schema) throws IOException {
 
         OutputJobInfo jobInfo = getJobInfo(job);
-        Map<String,String> partMap = jobInfo.getPartitionValues();
+        Map<String, String> partMap = jobInfo.getPartitionValues();
         setPartDetails(jobInfo, schema, partMap);
         job.getConfiguration().set(HCatConstants.HCAT_KEY_OUTPUT_INFO, HCatUtil.serialize(jobInfo));
     }
@@ -222,9 +222,9 @@
      */
     @Override
     public RecordWriter<WritableComparable<?>, HCatRecord>
-        getRecordWriter(TaskAttemptContext context)
+    getRecordWriter(TaskAttemptContext context)
         throws IOException, InterruptedException {
-      return getOutputFormat(context).getRecordWriter(context);
+        return getOutputFormat(context).getRecordWriter(context);
     }
 
 
@@ -238,25 +238,25 @@
      */
     @Override
     public OutputCommitter getOutputCommitter(TaskAttemptContext context
-                                       ) throws IOException, InterruptedException {
+    ) throws IOException, InterruptedException {
         return getOutputFormat(context).getOutputCommitter(context);
     }
 
     private static int getMaxDynamicPartitions(HiveConf hConf) {
-      // by default the bounds checking for maximum number of
-      // dynamic partitions is disabled (-1)
-      int maxDynamicPartitions = -1;
+        // by default the bounds checking for maximum number of
+        // dynamic partitions is disabled (-1)
+        int maxDynamicPartitions = -1;
 
-      if (HCatConstants.HCAT_IS_DYNAMIC_MAX_PTN_CHECK_ENABLED){
-        maxDynamicPartitions = hConf.getIntVar(
-                                HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS);
-      }
+        if (HCatConstants.HCAT_IS_DYNAMIC_MAX_PTN_CHECK_ENABLED) {
+            maxDynamicPartitions = hConf.getIntVar(
+                HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS);
+        }
 
-      return maxDynamicPartitions;
+        return maxDynamicPartitions;
     }
 
     private static boolean getHarRequested(HiveConf hConf) {
-      return hConf.getBoolVar(HiveConf.ConfVars.HIVEARCHIVEENABLED);
+        return hConf.getBoolVar(HiveConf.ConfVars.HIVEARCHIVEENABLED);
     }
 
 }
diff --git a/src/java/org/apache/hcatalog/mapreduce/HCatRecordReader.java b/src/java/org/apache/hcatalog/mapreduce/HCatRecordReader.java
index c038619..e91d12c 100644
--- a/src/java/org/apache/hcatalog/mapreduce/HCatRecordReader.java
+++ b/src/java/org/apache/hcatalog/mapreduce/HCatRecordReader.java
@@ -61,7 +61,7 @@
 
     private Deserializer deserializer;
 
-    private Map<String,String> valuesNotInDataCols;
+    private Map<String, String> valuesNotInDataCols;
 
     private HCatSchema outputSchema = null;
     private HCatSchema dataSchema = null;
@@ -70,9 +70,9 @@
      * Instantiates a new hcat record reader.
      */
     public HCatRecordReader(HCatStorageHandler storageHandler,
-                     Map<String,String> valuesNotInDataCols) {
-      this.storageHandler = storageHandler;
-      this.valuesNotInDataCols = valuesNotInDataCols;
+                            Map<String, String> valuesNotInDataCols) {
+        this.storageHandler = storageHandler;
+        this.valuesNotInDataCols = valuesNotInDataCols;
     }
 
     /* (non-Javadoc)
@@ -82,62 +82,62 @@
      */
     @Override
     public void initialize(org.apache.hadoop.mapreduce.InputSplit split,
-        TaskAttemptContext taskContext) throws IOException, InterruptedException {
+                           TaskAttemptContext taskContext) throws IOException, InterruptedException {
 
-      HCatSplit hcatSplit = InternalUtil.castToHCatSplit(split);
+        HCatSplit hcatSplit = InternalUtil.castToHCatSplit(split);
 
-      baseRecordReader = createBaseRecordReader(hcatSplit, storageHandler, taskContext);
-      createDeserializer(hcatSplit, storageHandler, taskContext);
+        baseRecordReader = createBaseRecordReader(hcatSplit, storageHandler, taskContext);
+        createDeserializer(hcatSplit, storageHandler, taskContext);
 
-      // Pull the output schema out of the TaskAttemptContext
-      outputSchema = (HCatSchema) HCatUtil.deserialize(
-          taskContext.getConfiguration().get(HCatConstants.HCAT_KEY_OUTPUT_SCHEMA));
+        // Pull the output schema out of the TaskAttemptContext
+        outputSchema = (HCatSchema) HCatUtil.deserialize(
+            taskContext.getConfiguration().get(HCatConstants.HCAT_KEY_OUTPUT_SCHEMA));
 
-      if (outputSchema == null) {
-        outputSchema = hcatSplit.getTableSchema();
-      }
+        if (outputSchema == null) {
+            outputSchema = hcatSplit.getTableSchema();
+        }
 
-      // Pull the table schema out of the Split info
-      // TODO This should be passed in the TaskAttemptContext instead
-      dataSchema = hcatSplit.getDataSchema();
+        // Pull the table schema out of the Split info
+        // TODO This should be passed in the TaskAttemptContext instead
+        dataSchema = hcatSplit.getDataSchema();
 
-      errorTracker = new InputErrorTracker(taskContext.getConfiguration());
+        errorTracker = new InputErrorTracker(taskContext.getConfiguration());
     }
 
     private org.apache.hadoop.mapred.RecordReader createBaseRecordReader(HCatSplit hcatSplit,
-        HCatStorageHandler storageHandler, TaskAttemptContext taskContext) throws IOException {
+                                                                         HCatStorageHandler storageHandler, TaskAttemptContext taskContext) throws IOException {
 
-      JobConf jobConf = HCatUtil.getJobConfFromContext(taskContext);
-      HCatUtil.copyJobPropertiesToJobConf(hcatSplit.getPartitionInfo().getJobProperties(), jobConf);
-      org.apache.hadoop.mapred.InputFormat inputFormat =
-          HCatInputFormat.getMapRedInputFormat(jobConf, storageHandler.getInputFormatClass());
-      return inputFormat.getRecordReader(hcatSplit.getBaseSplit(), jobConf,
-          InternalUtil.createReporter(taskContext));
+        JobConf jobConf = HCatUtil.getJobConfFromContext(taskContext);
+        HCatUtil.copyJobPropertiesToJobConf(hcatSplit.getPartitionInfo().getJobProperties(), jobConf);
+        org.apache.hadoop.mapred.InputFormat inputFormat =
+            HCatInputFormat.getMapRedInputFormat(jobConf, storageHandler.getInputFormatClass());
+        return inputFormat.getRecordReader(hcatSplit.getBaseSplit(), jobConf,
+            InternalUtil.createReporter(taskContext));
     }
 
     private void createDeserializer(HCatSplit hcatSplit, HCatStorageHandler storageHandler,
-        TaskAttemptContext taskContext) throws IOException {
+                                    TaskAttemptContext taskContext) throws IOException {
 
-      deserializer = ReflectionUtils.newInstance(storageHandler.getSerDeClass(),
-          taskContext.getConfiguration());
+        deserializer = ReflectionUtils.newInstance(storageHandler.getSerDeClass(),
+            taskContext.getConfiguration());
 
-      try {
-        InternalUtil.initializeDeserializer(deserializer, storageHandler.getConf(),
-            hcatSplit.getPartitionInfo().getTableInfo(),
-            hcatSplit.getPartitionInfo().getPartitionSchema());
-      } catch (SerDeException e) {
-        throw new IOException("Failed initializing deserializer "
-            + storageHandler.getSerDeClass().getName(), e);
-      }
+        try {
+            InternalUtil.initializeDeserializer(deserializer, storageHandler.getConf(),
+                hcatSplit.getPartitionInfo().getTableInfo(),
+                hcatSplit.getPartitionInfo().getPartitionSchema());
+        } catch (SerDeException e) {
+            throw new IOException("Failed initializing deserializer "
+                + storageHandler.getSerDeClass().getName(), e);
+        }
     }
 
-  /* (non-Javadoc)
-     * @see org.apache.hadoop.mapreduce.RecordReader#getCurrentKey()
-     */
+    /* (non-Javadoc)
+    * @see org.apache.hadoop.mapreduce.RecordReader#getCurrentKey()
+    */
     @Override
     public WritableComparable getCurrentKey()
-    throws IOException, InterruptedException {
-      return currentKey;
+        throws IOException, InterruptedException {
+        return currentKey;
     }
 
     /* (non-Javadoc)
@@ -145,140 +145,140 @@
      */
     @Override
     public HCatRecord getCurrentValue() throws IOException, InterruptedException {
-      return currentHCatRecord;
+        return currentHCatRecord;
     }
 
     /* (non-Javadoc)
      * @see org.apache.hadoop.mapreduce.RecordReader#getProgress()
      */
     @Override
-    public float getProgress()  {
+    public float getProgress() {
         try {
-          return baseRecordReader.getProgress();
+            return baseRecordReader.getProgress();
         } catch (IOException e) {
-            LOG.warn("Exception in HCatRecord reader",e);
+            LOG.warn("Exception in HCatRecord reader", e);
         }
         return 0.0f; // errored
     }
 
-  /**
-   * Check if the wrapped RecordReader has another record, and if so convert it into an
-   * HCatRecord. We both check for records and convert here so a configurable percent of
-   * bad records can be tolerated.
-   *
-   * @return if there is a next record
-   * @throws IOException on error
-   * @throws InterruptedException on error
-   */
-  @Override
-  public boolean nextKeyValue() throws IOException, InterruptedException {
-    if (currentKey == null) {
-      currentKey = baseRecordReader.createKey();
-      currentValue = baseRecordReader.createValue();
-    }
-
-    while (baseRecordReader.next(currentKey, currentValue)) {
-      HCatRecord r = null;
-      Throwable t = null;
-
-      errorTracker.incRecords();
-
-      try {
-        Object o = deserializer.deserialize(currentValue);
-        r = new LazyHCatRecord(o, deserializer.getObjectInspector());
-      } catch (Throwable throwable) {
-        t = throwable;
-      }
-
-      if (r == null) {
-        errorTracker.incErrors(t);
-        continue;
-      }
-
-      DefaultHCatRecord dr = new DefaultHCatRecord(outputSchema.size());
-      int i = 0;
-      for (String fieldName : outputSchema.getFieldNames()) {
-        if (dataSchema.getPosition(fieldName) != null) {
-          dr.set(i, r.get(fieldName, dataSchema));
-        } else {
-          dr.set(i, valuesNotInDataCols.get(fieldName));
-        }
-        i++;
-      }
-
-      currentHCatRecord = dr;
-      return true;
-    }
-
-    return false;
-  }
-
-  /* (non-Javadoc)
-     * @see org.apache.hadoop.mapreduce.RecordReader#close()
+    /**
+     * Check if the wrapped RecordReader has another record, and if so convert it into an
+     * HCatRecord. We both check for records and convert here so a configurable percent of
+     * bad records can be tolerated.
+     *
+     * @return if there is a next record
+     * @throws IOException on error
+     * @throws InterruptedException on error
      */
     @Override
+    public boolean nextKeyValue() throws IOException, InterruptedException {
+        if (currentKey == null) {
+            currentKey = baseRecordReader.createKey();
+            currentValue = baseRecordReader.createValue();
+        }
+
+        while (baseRecordReader.next(currentKey, currentValue)) {
+            HCatRecord r = null;
+            Throwable t = null;
+
+            errorTracker.incRecords();
+
+            try {
+                Object o = deserializer.deserialize(currentValue);
+                r = new LazyHCatRecord(o, deserializer.getObjectInspector());
+            } catch (Throwable throwable) {
+                t = throwable;
+            }
+
+            if (r == null) {
+                errorTracker.incErrors(t);
+                continue;
+            }
+
+            DefaultHCatRecord dr = new DefaultHCatRecord(outputSchema.size());
+            int i = 0;
+            for (String fieldName : outputSchema.getFieldNames()) {
+                if (dataSchema.getPosition(fieldName) != null) {
+                    dr.set(i, r.get(fieldName, dataSchema));
+                } else {
+                    dr.set(i, valuesNotInDataCols.get(fieldName));
+                }
+                i++;
+            }
+
+            currentHCatRecord = dr;
+            return true;
+        }
+
+        return false;
+    }
+
+    /* (non-Javadoc)
+    * @see org.apache.hadoop.mapreduce.RecordReader#close()
+    */
+    @Override
     public void close() throws IOException {
         baseRecordReader.close();
     }
 
-  /**
-   * Tracks number of of errors in input and throws a Runtime exception
-   * if the rate of errors crosses a limit.
-   * <br/>
-   * The intention is to skip over very rare file corruption or incorrect
-   * input, but catch programmer errors (incorrect format, or incorrect
-   * deserializers etc).
-   *
-   * This class was largely copied from Elephant-Bird (thanks @rangadi!)
-   * https://github.com/kevinweil/elephant-bird/blob/master/core/src/main/java/com/twitter/elephantbird/mapreduce/input/LzoRecordReader.java
-   */
-  static class InputErrorTracker {
-    long numRecords;
-    long numErrors;
+    /**
+     * Tracks number of of errors in input and throws a Runtime exception
+     * if the rate of errors crosses a limit.
+     * <br/>
+     * The intention is to skip over very rare file corruption or incorrect
+     * input, but catch programmer errors (incorrect format, or incorrect
+     * deserializers etc).
+     *
+     * This class was largely copied from Elephant-Bird (thanks @rangadi!)
+     * https://github.com/kevinweil/elephant-bird/blob/master/core/src/main/java/com/twitter/elephantbird/mapreduce/input/LzoRecordReader.java
+     */
+    static class InputErrorTracker {
+        long numRecords;
+        long numErrors;
 
-    double errorThreshold; // max fraction of errors allowed
-    long minErrors; // throw error only after this many errors
+        double errorThreshold; // max fraction of errors allowed
+        long minErrors; // throw error only after this many errors
 
-    InputErrorTracker(Configuration conf) {
-      errorThreshold = conf.getFloat(HCatConstants.HCAT_INPUT_BAD_RECORD_THRESHOLD_KEY,
-          HCatConstants.HCAT_INPUT_BAD_RECORD_THRESHOLD_DEFAULT);
-      minErrors = conf.getLong(HCatConstants.HCAT_INPUT_BAD_RECORD_MIN_KEY,
-          HCatConstants.HCAT_INPUT_BAD_RECORD_MIN_DEFAULT);
-      numRecords = 0;
-      numErrors = 0;
+        InputErrorTracker(Configuration conf) {
+            errorThreshold = conf.getFloat(HCatConstants.HCAT_INPUT_BAD_RECORD_THRESHOLD_KEY,
+                HCatConstants.HCAT_INPUT_BAD_RECORD_THRESHOLD_DEFAULT);
+            minErrors = conf.getLong(HCatConstants.HCAT_INPUT_BAD_RECORD_MIN_KEY,
+                HCatConstants.HCAT_INPUT_BAD_RECORD_MIN_DEFAULT);
+            numRecords = 0;
+            numErrors = 0;
+        }
+
+        void incRecords() {
+            numRecords++;
+        }
+
+        void incErrors(Throwable cause) {
+            numErrors++;
+            if (numErrors > numRecords) {
+                // incorrect use of this class
+                throw new RuntimeException("Forgot to invoke incRecords()?");
+            }
+
+            if (cause == null) {
+                cause = new Exception("Unknown error");
+            }
+
+            if (errorThreshold <= 0) { // no errors are tolerated
+                throw new RuntimeException("error while reading input records", cause);
+            }
+
+            LOG.warn("Error while reading an input record ("
+                + numErrors + " out of " + numRecords + " so far ): ", cause);
+
+            double errRate = numErrors / (double) numRecords;
+
+            // will always excuse the first error. We can decide if single
+            // error crosses threshold inside close() if we want to.
+            if (numErrors >= minErrors && errRate > errorThreshold) {
+                LOG.error(numErrors + " out of " + numRecords
+                    + " crosses configured threshold (" + errorThreshold + ")");
+                throw new RuntimeException("error rate while reading input records crossed threshold", cause);
+            }
+        }
     }
-
-    void incRecords() {
-      numRecords++;
-    }
-
-    void incErrors(Throwable cause) {
-      numErrors++;
-      if (numErrors > numRecords) {
-        // incorrect use of this class
-        throw new RuntimeException("Forgot to invoke incRecords()?");
-      }
-
-      if (cause == null) {
-        cause = new Exception("Unknown error");
-      }
-
-      if (errorThreshold <= 0) { // no errors are tolerated
-        throw new RuntimeException("error while reading input records", cause);
-      }
-
-      LOG.warn("Error while reading an input record ("
-          + numErrors + " out of " + numRecords + " so far ): ", cause);
-
-      double errRate = numErrors / (double) numRecords;
-
-      // will always excuse the first error. We can decide if single
-      // error crosses threshold inside close() if we want to.
-      if (numErrors >= minErrors && errRate > errorThreshold) {
-        LOG.error(numErrors + " out of " + numRecords
-            + " crosses configured threshold (" + errorThreshold + ")");
-        throw new RuntimeException("error rate while reading input records crossed threshold", cause);
-      }
-    }
-  }
 }
diff --git a/src/java/org/apache/hcatalog/mapreduce/HCatSplit.java b/src/java/org/apache/hcatalog/mapreduce/HCatSplit.java
index c510b9c..c92d665 100644
--- a/src/java/org/apache/hcatalog/mapreduce/HCatSplit.java
+++ b/src/java/org/apache/hcatalog/mapreduce/HCatSplit.java
@@ -33,7 +33,7 @@
 
 /** The HCatSplit wrapper around the InputSplit returned by the underlying InputFormat */
 public class HCatSplit extends InputSplit
-  implements Writable,org.apache.hadoop.mapred.InputSplit {
+    implements Writable, org.apache.hadoop.mapred.InputSplit {
 
     private static final Logger LOG = LoggerFactory.getLogger(HCatSplit.class);
     /** The partition info for the split. */
@@ -61,13 +61,13 @@
      * @param tableSchema the table level schema
      */
     public HCatSplit(PartInfo partitionInfo,
-        org.apache.hadoop.mapred.InputSplit baseMapRedSplit,
-        HCatSchema tableSchema) {
+                     org.apache.hadoop.mapred.InputSplit baseMapRedSplit,
+                     HCatSchema tableSchema) {
 
-      this.partitionInfo = partitionInfo;
-      // dataSchema can be obtained from partitionInfo.getPartitionSchema()
-      this.baseMapRedSplit = baseMapRedSplit;
-      this.tableSchema = tableSchema;
+        this.partitionInfo = partitionInfo;
+        // dataSchema can be obtained from partitionInfo.getPartitionSchema()
+        this.baseMapRedSplit = baseMapRedSplit;
+        this.tableSchema = tableSchema;
     }
 
     /**
@@ -99,7 +99,7 @@
      * @return the table schema
      */
     public HCatSchema getTableSchema() {
-      return this.tableSchema;
+        return this.tableSchema;
     }
 
     /* (non-Javadoc)
@@ -108,9 +108,9 @@
     @Override
     public long getLength() {
         try {
-          return baseMapRedSplit.getLength();
+            return baseMapRedSplit.getLength();
         } catch (IOException e) {
-          LOG.warn("Exception in HCatSplit",e);
+            LOG.warn("Exception in HCatSplit", e);
         }
         return 0; // we errored
     }
@@ -121,9 +121,9 @@
     @Override
     public String[] getLocations() {
         try {
-          return baseMapRedSplit.getLocations();
+            return baseMapRedSplit.getLocations();
         } catch (IOException e) {
-            LOG.warn("Exception in HCatSplit",e);
+            LOG.warn("Exception in HCatSplit", e);
         }
         return new String[0]; // we errored
     }
@@ -139,23 +139,23 @@
 
         String baseSplitClassName = WritableUtils.readString(input);
         org.apache.hadoop.mapred.InputSplit split;
-        try{
+        try {
             Class<? extends org.apache.hadoop.mapred.InputSplit> splitClass =
                 (Class<? extends org.apache.hadoop.mapred.InputSplit>) Class.forName(baseSplitClassName);
 
             //Class.forName().newInstance() does not work if the underlying
             //InputSplit has package visibility
             Constructor<? extends org.apache.hadoop.mapred.InputSplit>
-              constructor =
+                constructor =
                 splitClass.getDeclaredConstructor(new Class[]{});
             constructor.setAccessible(true);
 
             split = constructor.newInstance();
             // read baseSplit from input
-            ((Writable)split).readFields(input);
+            ((Writable) split).readFields(input);
             this.baseMapRedSplit = split;
-        }catch(Exception e){
-            throw new IOException ("Exception from " + baseSplitClassName, e);
+        } catch (Exception e) {
+            throw new IOException("Exception from " + baseSplitClassName, e);
         }
 
         String tableSchemaString = WritableUtils.readString(input);
@@ -173,7 +173,7 @@
         WritableUtils.writeString(output, partitionInfoString);
 
         WritableUtils.writeString(output, baseMapRedSplit.getClass().getName());
-        Writable baseSplitWritable = (Writable)baseMapRedSplit;
+        Writable baseSplitWritable = (Writable) baseMapRedSplit;
         //write  baseSplit into output
         baseSplitWritable.write(output);
 
diff --git a/src/java/org/apache/hcatalog/mapreduce/HCatStorageHandler.java b/src/java/org/apache/hcatalog/mapreduce/HCatStorageHandler.java
index d1ebf63..1ebcaa4 100644
--- a/src/java/org/apache/hcatalog/mapreduce/HCatStorageHandler.java
+++ b/src/java/org/apache/hcatalog/mapreduce/HCatStorageHandler.java
@@ -34,6 +34,7 @@
 public abstract class HCatStorageHandler implements HiveStorageHandler {
 
     //TODO move this to HiveStorageHandler
+
     /**
      * This method is called to allow the StorageHandlers the chance
      * to populate the JobContext.getConfiguration() with properties that
@@ -53,6 +54,7 @@
     public abstract void configureInputJobProperties(TableDesc tableDesc, Map<String, String> jobProperties);
 
     //TODO move this to HiveStorageHandler
+
     /**
      * This method is called to allow the StorageHandlers the chance
      * to populate the JobContext.getConfiguration() with properties that
@@ -72,46 +74,46 @@
     public abstract void configureOutputJobProperties(TableDesc tableDesc, Map<String, String> jobProperties);
 
     /**
-     * 
-     * 
+     *
+     *
      * @return authorization provider
      * @throws HiveException
      */
     public abstract HiveAuthorizationProvider getAuthorizationProvider()
-            throws HiveException;
-    
+        throws HiveException;
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see org.apache.hadoop.hive.ql.metadata.HiveStorageHandler#
-     * configureTableJobProperties(org.apache.hadoop.hive.ql.plan.TableDesc,
-     * java.util.Map)
-     */
+    * (non-Javadoc)
+    *
+    * @see org.apache.hadoop.hive.ql.metadata.HiveStorageHandler#
+    * configureTableJobProperties(org.apache.hadoop.hive.ql.plan.TableDesc,
+    * java.util.Map)
+    */
     @Override
     @Deprecated
     public final void configureTableJobProperties(TableDesc tableDesc,
-            Map<String, String> jobProperties) {
+                                                  Map<String, String> jobProperties) {
     }
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see org.apache.hadoop.conf.Configurable#getConf()
-     */
+    * (non-Javadoc)
+    *
+    * @see org.apache.hadoop.conf.Configurable#getConf()
+    */
     @Override
     public abstract Configuration getConf();
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see org.apache.hadoop.conf.Configurable#setConf(org.apache.hadoop.conf.
-     * Configuration)
-     */
+    * (non-Javadoc)
+    *
+    * @see org.apache.hadoop.conf.Configurable#setConf(org.apache.hadoop.conf.
+    * Configuration)
+    */
     @Override
     public abstract void setConf(Configuration conf);
 
     OutputFormatContainer getOutputFormatContainer(OutputFormat outputFormat) {
         return new DefaultOutputFormatContainer(outputFormat);
     }
-    
+
 }
diff --git a/src/java/org/apache/hcatalog/mapreduce/HCatTableInfo.java b/src/java/org/apache/hcatalog/mapreduce/HCatTableInfo.java
index 11f93e9..8e69b7e 100644
--- a/src/java/org/apache/hcatalog/mapreduce/HCatTableInfo.java
+++ b/src/java/org/apache/hcatalog/mapreduce/HCatTableInfo.java
@@ -35,150 +35,152 @@
 public class HCatTableInfo implements Serializable {
 
 
-  private static final long serialVersionUID = 1L;
+    private static final long serialVersionUID = 1L;
 
-  /** The db and table names */
-  private final String databaseName;
-  private final String tableName;
+    /** The db and table names */
+    private final String databaseName;
+    private final String tableName;
 
-  /** The table schema. */
-  private final HCatSchema dataColumns;
-  private final HCatSchema partitionColumns;
+    /** The table schema. */
+    private final HCatSchema dataColumns;
+    private final HCatSchema partitionColumns;
 
-  /** The table being written to */
-  private final Table table;
+    /** The table being written to */
+    private final Table table;
 
-  /** The storer info */
-  private StorerInfo storerInfo;
+    /** The storer info */
+    private StorerInfo storerInfo;
 
-  /**
-   * Initializes a new HCatTableInfo instance to be used with {@link HCatInputFormat}
-   * for reading data from a table.
-   * work with hadoop security, the kerberos principal name of the server - else null
-   * The principal name should be of the form:
-   * <servicename>/_HOST@<realm> like "hcat/_HOST@myrealm.com"
-   * The special string _HOST will be replaced automatically with the correct host name
-   * @param databaseName the db name
-   * @param tableName the table name
-   * @param dataColumns schema of columns which contain data
-   * @param partitionColumns schema of partition columns
-   * @param storerInfo information about storage descriptor
-   * @param table hive metastore table class
-   */
-  HCatTableInfo(
-      String databaseName,
-      String tableName,
-      HCatSchema dataColumns,
-      HCatSchema partitionColumns,
-      StorerInfo storerInfo,
-      Table table) {
-    this.databaseName = (databaseName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : databaseName;
-    this.tableName = tableName;
-    this.dataColumns = dataColumns;
-    this.table = table;
-    this.storerInfo = storerInfo;
-    this.partitionColumns = partitionColumns;
-  }
+    /**
+     * Initializes a new HCatTableInfo instance to be used with {@link HCatInputFormat}
+     * for reading data from a table.
+     * work with hadoop security, the kerberos principal name of the server - else null
+     * The principal name should be of the form:
+     * <servicename>/_HOST@<realm> like "hcat/_HOST@myrealm.com"
+     * The special string _HOST will be replaced automatically with the correct host name
+     * @param databaseName the db name
+     * @param tableName the table name
+     * @param dataColumns schema of columns which contain data
+     * @param partitionColumns schema of partition columns
+     * @param storerInfo information about storage descriptor
+     * @param table hive metastore table class
+     */
+    HCatTableInfo(
+        String databaseName,
+        String tableName,
+        HCatSchema dataColumns,
+        HCatSchema partitionColumns,
+        StorerInfo storerInfo,
+        Table table) {
+        this.databaseName = (databaseName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : databaseName;
+        this.tableName = tableName;
+        this.dataColumns = dataColumns;
+        this.table = table;
+        this.storerInfo = storerInfo;
+        this.partitionColumns = partitionColumns;
+    }
 
-  /**
-   * Gets the value of databaseName
-   * @return the databaseName
-   */
-  public String getDatabaseName() {
-    return databaseName;
-  }
+    /**
+     * Gets the value of databaseName
+     * @return the databaseName
+     */
+    public String getDatabaseName() {
+        return databaseName;
+    }
 
-  /**
-   * Gets the value of tableName
-   * @return the tableName
-   */
-  public String getTableName() {
-    return tableName;
-  }
+    /**
+     * Gets the value of tableName
+     * @return the tableName
+     */
+    public String getTableName() {
+        return tableName;
+    }
 
-  /**
-   * @return return schema of data columns as defined in meta store
-   */
-  public HCatSchema getDataColumns() {
-    return dataColumns;
-  }
+    /**
+     * @return return schema of data columns as defined in meta store
+     */
+    public HCatSchema getDataColumns() {
+        return dataColumns;
+    }
 
-  /**
-   * @return schema of partition columns
-   */
-  public HCatSchema getPartitionColumns() {
-    return partitionColumns;
-  }
+    /**
+     * @return schema of partition columns
+     */
+    public HCatSchema getPartitionColumns() {
+        return partitionColumns;
+    }
 
-  /**
-   * @return the storerInfo
-   */
-  public StorerInfo getStorerInfo() {
-    return storerInfo;
-  }
+    /**
+     * @return the storerInfo
+     */
+    public StorerInfo getStorerInfo() {
+        return storerInfo;
+    }
 
-  public String getTableLocation() {
-      return table.getSd().getLocation();
-  }
+    public String getTableLocation() {
+        return table.getSd().getLocation();
+    }
 
-  /**
-   * minimize dependency on hive classes so this is package private
-   * this should eventually no longer be used
-   * @return hive metastore representation of table
-   */
-  Table getTable() {
-    return table;
-  }
+    /**
+     * minimize dependency on hive classes so this is package private
+     * this should eventually no longer be used
+     * @return hive metastore representation of table
+     */
+    Table getTable() {
+        return table;
+    }
 
-  /**
-   * create an HCatTableInfo instance from the supplied Hive Table instance
-   * @param table to create an instance from
-   * @return HCatTableInfo
-   * @throws IOException
-   */
-  static HCatTableInfo valueOf(Table table) throws IOException {
-    // Explicitly use {@link org.apache.hadoop.hive.ql.metadata.Table} when getting the schema,
-    // but store @{link org.apache.hadoop.hive.metastore.api.Table} as this class is serialized
-    // into the job conf.
-    org.apache.hadoop.hive.ql.metadata.Table mTable =
-        new org.apache.hadoop.hive.ql.metadata.Table(table);
-    HCatSchema schema = HCatUtil.extractSchema(mTable);
-    StorerInfo storerInfo =
-        InternalUtil.extractStorerInfo(table.getSd(), table.getParameters());
-    HCatSchema partitionColumns = HCatUtil.getPartitionColumns(mTable);
-    return new HCatTableInfo(table.getDbName(), table.getTableName(), schema,
-        partitionColumns, storerInfo, table);
-  }
+    /**
+     * create an HCatTableInfo instance from the supplied Hive Table instance
+     * @param table to create an instance from
+     * @return HCatTableInfo
+     * @throws IOException
+     */
+    static HCatTableInfo valueOf(Table table) throws IOException {
+        // Explicitly use {@link org.apache.hadoop.hive.ql.metadata.Table} when getting the schema,
+        // but store @{link org.apache.hadoop.hive.metastore.api.Table} as this class is serialized
+        // into the job conf.
+        org.apache.hadoop.hive.ql.metadata.Table mTable =
+            new org.apache.hadoop.hive.ql.metadata.Table(table);
+        HCatSchema schema = HCatUtil.extractSchema(mTable);
+        StorerInfo storerInfo =
+            InternalUtil.extractStorerInfo(table.getSd(), table.getParameters());
+        HCatSchema partitionColumns = HCatUtil.getPartitionColumns(mTable);
+        return new HCatTableInfo(table.getDbName(), table.getTableName(), schema,
+            partitionColumns, storerInfo, table);
+    }
 
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) return true;
+        if (o == null || getClass() != o.getClass()) return false;
 
-    HCatTableInfo tableInfo = (HCatTableInfo) o;
+        HCatTableInfo tableInfo = (HCatTableInfo) o;
 
-    if (dataColumns != null ? !dataColumns.equals(tableInfo.dataColumns) : tableInfo.dataColumns != null) return false;
-    if (databaseName != null ? !databaseName.equals(tableInfo.databaseName) : tableInfo.databaseName != null) return false;
-    if (partitionColumns != null ? !partitionColumns.equals(tableInfo.partitionColumns) : tableInfo.partitionColumns != null)
-      return false;
-    if (storerInfo != null ? !storerInfo.equals(tableInfo.storerInfo) : tableInfo.storerInfo != null) return false;
-    if (table != null ? !table.equals(tableInfo.table) : tableInfo.table != null) return false;
-    if (tableName != null ? !tableName.equals(tableInfo.tableName) : tableInfo.tableName != null) return false;
+        if (dataColumns != null ? !dataColumns.equals(tableInfo.dataColumns) : tableInfo.dataColumns != null)
+            return false;
+        if (databaseName != null ? !databaseName.equals(tableInfo.databaseName) : tableInfo.databaseName != null)
+            return false;
+        if (partitionColumns != null ? !partitionColumns.equals(tableInfo.partitionColumns) : tableInfo.partitionColumns != null)
+            return false;
+        if (storerInfo != null ? !storerInfo.equals(tableInfo.storerInfo) : tableInfo.storerInfo != null) return false;
+        if (table != null ? !table.equals(tableInfo.table) : tableInfo.table != null) return false;
+        if (tableName != null ? !tableName.equals(tableInfo.tableName) : tableInfo.tableName != null) return false;
 
-    return true;
-  }
+        return true;
+    }
 
 
-  @Override
-  public int hashCode() {
-    int result = databaseName != null ? databaseName.hashCode() : 0;
-    result = 31 * result + (tableName != null ? tableName.hashCode() : 0);
-    result = 31 * result + (dataColumns != null ? dataColumns.hashCode() : 0);
-    result = 31 * result + (partitionColumns != null ? partitionColumns.hashCode() : 0);
-    result = 31 * result + (table != null ? table.hashCode() : 0);
-    result = 31 * result + (storerInfo != null ? storerInfo.hashCode() : 0);
-    return result;
-  }
+    @Override
+    public int hashCode() {
+        int result = databaseName != null ? databaseName.hashCode() : 0;
+        result = 31 * result + (tableName != null ? tableName.hashCode() : 0);
+        result = 31 * result + (dataColumns != null ? dataColumns.hashCode() : 0);
+        result = 31 * result + (partitionColumns != null ? partitionColumns.hashCode() : 0);
+        result = 31 * result + (table != null ? table.hashCode() : 0);
+        result = 31 * result + (storerInfo != null ? storerInfo.hashCode() : 0);
+        return result;
+    }
 
 }
 
diff --git a/src/java/org/apache/hcatalog/mapreduce/InitializeInput.java b/src/java/org/apache/hcatalog/mapreduce/InitializeInput.java
index f14e359..ce65623 100644
--- a/src/java/org/apache/hcatalog/mapreduce/InitializeInput.java
+++ b/src/java/org/apache/hcatalog/mapreduce/InitializeInput.java
@@ -50,136 +50,136 @@
 
     private static final Logger LOG = LoggerFactory.getLogger(InitializeInput.class);
 
-  /**
-   * Set the input to use for the Job. This queries the metadata server with the specified
-   * partition predicates, gets the matching partitions, and puts the information in the job
-   * configuration object.
-   *
-   * To ensure a known InputJobInfo state, only the database name, table name, filter, and
-   * properties are preserved. All other modification from the given InputJobInfo are discarded.
-   *
-   * After calling setInput, InputJobInfo can be retrieved from the job configuration as follows:
-   * {code}
-   * InputJobInfo inputInfo = (InputJobInfo) HCatUtil.deserialize(
-   *     job.getConfiguration().get(HCatConstants.HCAT_KEY_JOB_INFO));
-   * {code}
-   *
-   * @param job the job object
-   * @param theirInputJobInfo information on the Input to read
-   * @throws Exception
-   */
-  public static void setInput(Job job, InputJobInfo theirInputJobInfo) throws Exception {
-    InputJobInfo inputJobInfo = InputJobInfo.create(
-        theirInputJobInfo.getDatabaseName(),
-        theirInputJobInfo.getTableName(),
-        theirInputJobInfo.getFilter());
-    inputJobInfo.getProperties().putAll(theirInputJobInfo.getProperties());
-    job.getConfiguration().set(
-        HCatConstants.HCAT_KEY_JOB_INFO,
-        HCatUtil.serialize(getInputJobInfo(job, inputJobInfo, null)));
-  }
+    /**
+     * Set the input to use for the Job. This queries the metadata server with the specified
+     * partition predicates, gets the matching partitions, and puts the information in the job
+     * configuration object.
+     *
+     * To ensure a known InputJobInfo state, only the database name, table name, filter, and
+     * properties are preserved. All other modification from the given InputJobInfo are discarded.
+     *
+     * After calling setInput, InputJobInfo can be retrieved from the job configuration as follows:
+     * {code}
+     * InputJobInfo inputInfo = (InputJobInfo) HCatUtil.deserialize(
+     *     job.getConfiguration().get(HCatConstants.HCAT_KEY_JOB_INFO));
+     * {code}
+     *
+     * @param job the job object
+     * @param theirInputJobInfo information on the Input to read
+     * @throws Exception
+     */
+    public static void setInput(Job job, InputJobInfo theirInputJobInfo) throws Exception {
+        InputJobInfo inputJobInfo = InputJobInfo.create(
+            theirInputJobInfo.getDatabaseName(),
+            theirInputJobInfo.getTableName(),
+            theirInputJobInfo.getFilter());
+        inputJobInfo.getProperties().putAll(theirInputJobInfo.getProperties());
+        job.getConfiguration().set(
+            HCatConstants.HCAT_KEY_JOB_INFO,
+            HCatUtil.serialize(getInputJobInfo(job, inputJobInfo, null)));
+    }
 
-  /**
-   * Returns the given InputJobInfo after populating with data queried from the metadata service.
-   */
-  private static InputJobInfo getInputJobInfo(
-      Job job, InputJobInfo inputJobInfo, String locationFilter) throws Exception {
+    /**
+     * Returns the given InputJobInfo after populating with data queried from the metadata service.
+     */
+    private static InputJobInfo getInputJobInfo(
+        Job job, InputJobInfo inputJobInfo, String locationFilter) throws Exception {
 
-    HiveMetaStoreClient client = null;
-    HiveConf hiveConf = null;
-    try {
-      if (job != null){
-        hiveConf = HCatUtil.getHiveConf(job.getConfiguration());
-      } else {
-        hiveConf = new HiveConf(HCatInputFormat.class);
-      }
-      client = HCatUtil.getHiveClient(hiveConf);
-      Table table = HCatUtil.getTable(client, inputJobInfo.getDatabaseName(),
-          inputJobInfo.getTableName());
+        HiveMetaStoreClient client = null;
+        HiveConf hiveConf = null;
+        try {
+            if (job != null) {
+                hiveConf = HCatUtil.getHiveConf(job.getConfiguration());
+            } else {
+                hiveConf = new HiveConf(HCatInputFormat.class);
+            }
+            client = HCatUtil.getHiveClient(hiveConf);
+            Table table = HCatUtil.getTable(client, inputJobInfo.getDatabaseName(),
+                inputJobInfo.getTableName());
 
-      List<PartInfo> partInfoList = new ArrayList<PartInfo>();
+            List<PartInfo> partInfoList = new ArrayList<PartInfo>();
 
-      inputJobInfo.setTableInfo(HCatTableInfo.valueOf(table.getTTable()));
-      if( table.getPartitionKeys().size() != 0 ) {
-        //Partitioned table
-        List<Partition> parts = client.listPartitionsByFilter(inputJobInfo.getDatabaseName(),
-                                                              inputJobInfo.getTableName(),
-                                                              inputJobInfo.getFilter(),
-                                                              (short) -1);
+            inputJobInfo.setTableInfo(HCatTableInfo.valueOf(table.getTTable()));
+            if (table.getPartitionKeys().size() != 0) {
+                //Partitioned table
+                List<Partition> parts = client.listPartitionsByFilter(inputJobInfo.getDatabaseName(),
+                    inputJobInfo.getTableName(),
+                    inputJobInfo.getFilter(),
+                    (short) -1);
 
-        // Default to 100,000 partitions if hive.metastore.maxpartition is not defined
-        int maxPart = hiveConf.getInt("hcat.metastore.maxpartitions", 100000);
-        if (parts != null && parts.size() > maxPart) {
-          throw new HCatException(ErrorType.ERROR_EXCEED_MAXPART, "total number of partitions is " + parts.size());
+                // Default to 100,000 partitions if hive.metastore.maxpartition is not defined
+                int maxPart = hiveConf.getInt("hcat.metastore.maxpartitions", 100000);
+                if (parts != null && parts.size() > maxPart) {
+                    throw new HCatException(ErrorType.ERROR_EXCEED_MAXPART, "total number of partitions is " + parts.size());
+                }
+
+                // populate partition info
+                for (Partition ptn : parts) {
+                    HCatSchema schema = HCatUtil.extractSchema(
+                        new org.apache.hadoop.hive.ql.metadata.Partition(table, ptn));
+                    PartInfo partInfo = extractPartInfo(schema, ptn.getSd(),
+                        ptn.getParameters(), job.getConfiguration(), inputJobInfo);
+                    partInfo.setPartitionValues(createPtnKeyValueMap(table, ptn));
+                    partInfoList.add(partInfo);
+                }
+
+            } else {
+                //Non partitioned table
+                HCatSchema schema = HCatUtil.extractSchema(table);
+                PartInfo partInfo = extractPartInfo(schema, table.getTTable().getSd(),
+                    table.getParameters(), job.getConfiguration(), inputJobInfo);
+                partInfo.setPartitionValues(new HashMap<String, String>());
+                partInfoList.add(partInfo);
+            }
+            inputJobInfo.setPartitions(partInfoList);
+
+            return inputJobInfo;
+        } finally {
+            HCatUtil.closeHiveClientQuietly(client);
         }
 
-        // populate partition info
-        for (Partition ptn : parts){
-          HCatSchema schema = HCatUtil.extractSchema(
-              new org.apache.hadoop.hive.ql.metadata.Partition(table, ptn));
-          PartInfo partInfo = extractPartInfo(schema, ptn.getSd(),
-              ptn.getParameters(), job.getConfiguration(), inputJobInfo);
-          partInfo.setPartitionValues(createPtnKeyValueMap(table, ptn));
-          partInfoList.add(partInfo);
+    }
+
+    private static Map<String, String> createPtnKeyValueMap(Table table, Partition ptn) throws IOException {
+        List<String> values = ptn.getValues();
+        if (values.size() != table.getPartitionKeys().size()) {
+            throw new IOException("Partition values in partition inconsistent with table definition, table "
+                + table.getTableName() + " has "
+                + table.getPartitionKeys().size()
+                + " partition keys, partition has " + values.size() + "partition values");
         }
 
-      }else{
-        //Non partitioned table
-        HCatSchema schema = HCatUtil.extractSchema(table);
-        PartInfo partInfo = extractPartInfo(schema, table.getTTable().getSd(),
-            table.getParameters(), job.getConfiguration(), inputJobInfo);
-        partInfo.setPartitionValues(new HashMap<String,String>());
-        partInfoList.add(partInfo);
-      }
-      inputJobInfo.setPartitions(partInfoList);
+        Map<String, String> ptnKeyValues = new HashMap<String, String>();
 
-      return inputJobInfo;
-    } finally {
-      HCatUtil.closeHiveClientQuietly(client);
+        int i = 0;
+        for (FieldSchema schema : table.getPartitionKeys()) {
+            // CONCERN : the way this mapping goes, the order *needs* to be preserved for table.getPartitionKeys() and ptn.getValues()
+            ptnKeyValues.put(schema.getName().toLowerCase(), values.get(i));
+            i++;
+        }
+
+        return ptnKeyValues;
     }
 
-  }
+    private static PartInfo extractPartInfo(HCatSchema schema, StorageDescriptor sd,
+                                            Map<String, String> parameters, Configuration conf,
+                                            InputJobInfo inputJobInfo) throws IOException {
 
-  private static Map<String, String> createPtnKeyValueMap(Table table, Partition ptn) throws IOException{
-    List<String> values = ptn.getValues();
-    if( values.size() != table.getPartitionKeys().size() ) {
-      throw new IOException("Partition values in partition inconsistent with table definition, table "
-          + table.getTableName() + " has "
-          + table.getPartitionKeys().size()
-          + " partition keys, partition has " + values.size() + "partition values" );
+        StorerInfo storerInfo = InternalUtil.extractStorerInfo(sd, parameters);
+
+        Properties hcatProperties = new Properties();
+        HCatStorageHandler storageHandler = HCatUtil.getStorageHandler(conf, storerInfo);
+
+        // copy the properties from storageHandler to jobProperties
+        Map<String, String> jobProperties = HCatUtil.getInputJobProperties(storageHandler, inputJobInfo);
+
+        for (String key : parameters.keySet()) {
+            hcatProperties.put(key, parameters.get(key));
+        }
+        // FIXME
+        // Bloating partinfo with inputJobInfo is not good
+        return new PartInfo(schema, storageHandler, sd.getLocation(),
+            hcatProperties, jobProperties, inputJobInfo.getTableInfo());
     }
 
-    Map<String,String> ptnKeyValues = new HashMap<String,String>();
-
-    int i = 0;
-    for(FieldSchema schema : table.getPartitionKeys()) {
-      // CONCERN : the way this mapping goes, the order *needs* to be preserved for table.getPartitionKeys() and ptn.getValues()
-      ptnKeyValues.put(schema.getName().toLowerCase(), values.get(i));
-      i++;
-    }
-
-    return ptnKeyValues;
-  }
-
-  private static PartInfo extractPartInfo(HCatSchema schema, StorageDescriptor sd,
-      Map<String,String> parameters, Configuration conf,
-      InputJobInfo inputJobInfo) throws IOException{
-
-    StorerInfo storerInfo = InternalUtil.extractStorerInfo(sd,parameters);
-
-    Properties hcatProperties = new Properties();
-    HCatStorageHandler storageHandler = HCatUtil.getStorageHandler(conf, storerInfo);
-
-    // copy the properties from storageHandler to jobProperties
-    Map<String, String>jobProperties = HCatUtil.getInputJobProperties(storageHandler, inputJobInfo);
-
-    for (String key : parameters.keySet()){
-        hcatProperties.put(key, parameters.get(key));
-    }
-    // FIXME
-    // Bloating partinfo with inputJobInfo is not good
-    return new PartInfo(schema, storageHandler, sd.getLocation(),
-        hcatProperties, jobProperties, inputJobInfo.getTableInfo());
-  }
-
 }
diff --git a/src/java/org/apache/hcatalog/mapreduce/InputJobInfo.java b/src/java/org/apache/hcatalog/mapreduce/InputJobInfo.java
index f39e6eb..f2b3890 100644
--- a/src/java/org/apache/hcatalog/mapreduce/InputJobInfo.java
+++ b/src/java/org/apache/hcatalog/mapreduce/InputJobInfo.java
@@ -24,114 +24,114 @@
 import java.util.Properties;
 
 /** The class used to serialize and store the information read from the metadata server */
-public class InputJobInfo implements Serializable{
+public class InputJobInfo implements Serializable {
 
-  /** The serialization version */
-  private static final long serialVersionUID = 1L;
+    /** The serialization version */
+    private static final long serialVersionUID = 1L;
 
-  /** The db and table names. */
-  private final String databaseName;
-  private final String tableName;
+    /** The db and table names. */
+    private final String databaseName;
+    private final String tableName;
 
-  /** meta information of the table to be read from */
-  private HCatTableInfo tableInfo;
+    /** meta information of the table to be read from */
+    private HCatTableInfo tableInfo;
 
-  /** The partition filter */
-  private String filter;
+    /** The partition filter */
+    private String filter;
 
-  /** The list of partitions matching the filter. */
-  private List<PartInfo> partitions;
+    /** The list of partitions matching the filter. */
+    private List<PartInfo> partitions;
 
-  /** implementation specific job properties */
-  private Properties properties;
+    /** implementation specific job properties */
+    private Properties properties;
 
-  /**
-   * Initializes a new InputJobInfo
-   * for reading data from a table.
-   * @param databaseName the db name
-   * @param tableName the table name
-   * @param filter the partition filter
-   */
+    /**
+     * Initializes a new InputJobInfo
+     * for reading data from a table.
+     * @param databaseName the db name
+     * @param tableName the table name
+     * @param filter the partition filter
+     */
 
-  public static InputJobInfo create(String databaseName,
-      String tableName,
-      String filter) {
-    return new InputJobInfo(databaseName, tableName, filter);
-  }
+    public static InputJobInfo create(String databaseName,
+                                      String tableName,
+                                      String filter) {
+        return new InputJobInfo(databaseName, tableName, filter);
+    }
 
-  
-  private InputJobInfo(String databaseName,
-                       String tableName,
-                       String filter) {
-    this.databaseName = (databaseName == null) ? 
-                        MetaStoreUtils.DEFAULT_DATABASE_NAME : databaseName;
-    this.tableName = tableName;
-    this.filter = filter;
-    this.properties = new Properties();
-  }
 
-  /**
-   * Gets the value of databaseName
-   * @return the databaseName
-   */
-  public String getDatabaseName() {
-    return databaseName;
-  }
+    private InputJobInfo(String databaseName,
+                         String tableName,
+                         String filter) {
+        this.databaseName = (databaseName == null) ?
+            MetaStoreUtils.DEFAULT_DATABASE_NAME : databaseName;
+        this.tableName = tableName;
+        this.filter = filter;
+        this.properties = new Properties();
+    }
 
-  /**
-   * Gets the value of tableName
-   * @return the tableName
-   */
-  public String getTableName() {
-    return tableName;
-  }
+    /**
+     * Gets the value of databaseName
+     * @return the databaseName
+     */
+    public String getDatabaseName() {
+        return databaseName;
+    }
 
-  /**
-   * Gets the table's meta information
-   * @return the HCatTableInfo
-   */
-  public HCatTableInfo getTableInfo() {
-    return tableInfo;
-  }
+    /**
+     * Gets the value of tableName
+     * @return the tableName
+     */
+    public String getTableName() {
+        return tableName;
+    }
 
-  /**
-   * set the tablInfo instance
-   * this should be the same instance
-   * determined by this object's DatabaseName and TableName
-   * @param tableInfo
-   */
-  void setTableInfo(HCatTableInfo tableInfo) {
-    this.tableInfo = tableInfo;
-  }
+    /**
+     * Gets the table's meta information
+     * @return the HCatTableInfo
+     */
+    public HCatTableInfo getTableInfo() {
+        return tableInfo;
+    }
 
-  /**
-   * Gets the value of partition filter
-   * @return the filter string
-   */
-  public String getFilter() {
-    return filter;
-  }
+    /**
+     * set the tablInfo instance
+     * this should be the same instance
+     * determined by this object's DatabaseName and TableName
+     * @param tableInfo
+     */
+    void setTableInfo(HCatTableInfo tableInfo) {
+        this.tableInfo = tableInfo;
+    }
 
-  /**
-   * @return partition info
-   */
-  public List<PartInfo> getPartitions() {
-    return partitions;
-  }
+    /**
+     * Gets the value of partition filter
+     * @return the filter string
+     */
+    public String getFilter() {
+        return filter;
+    }
 
-  /**
-   * @return partition info  list
-   */
-  void setPartitions(List<PartInfo> partitions) {
-    this.partitions = partitions;
-  }
+    /**
+     * @return partition info
+     */
+    public List<PartInfo> getPartitions() {
+        return partitions;
+    }
 
-  /**
-   * Set/Get Property information to be passed down to *StorageHandler implementation
-   * put implementation specific storage handler configurations here
-   * @return the implementation specific job properties 
-   */
-  public Properties getProperties() {
-    return properties;
-  }
+    /**
+     * @return partition info  list
+     */
+    void setPartitions(List<PartInfo> partitions) {
+        this.partitions = partitions;
+    }
+
+    /**
+     * Set/Get Property information to be passed down to *StorageHandler implementation
+     * put implementation specific storage handler configurations here
+     * @return the implementation specific job properties
+     */
+    public Properties getProperties() {
+        return properties;
+    }
 }
diff --git a/src/java/org/apache/hcatalog/mapreduce/InternalUtil.java b/src/java/org/apache/hcatalog/mapreduce/InternalUtil.java
index 10a0ee9..8064623 100644
--- a/src/java/org/apache/hcatalog/mapreduce/InternalUtil.java
+++ b/src/java/org/apache/hcatalog/mapreduce/InternalUtil.java
@@ -57,132 +57,132 @@
 
     static StorerInfo extractStorerInfo(StorageDescriptor sd, Map<String, String> properties) throws IOException {
         Properties hcatProperties = new Properties();
-        for (String key : properties.keySet()){
+        for (String key : properties.keySet()) {
             hcatProperties.put(key, properties.get(key));
         }
 
         // also populate with StorageDescriptor->SerDe.Parameters
-        for (Map.Entry<String, String>param :
+        for (Map.Entry<String, String> param :
             sd.getSerdeInfo().getParameters().entrySet()) {
-          hcatProperties.put(param.getKey(), param.getValue());
+            hcatProperties.put(param.getKey(), param.getValue());
         }
 
 
         return new StorerInfo(
-                sd.getInputFormat(), sd.getOutputFormat(), sd.getSerdeInfo().getSerializationLib(),
-                properties.get(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE),
-                hcatProperties);
+            sd.getInputFormat(), sd.getOutputFormat(), sd.getSerdeInfo().getSerializationLib(),
+            properties.get(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE),
+            hcatProperties);
     }
 
-  static StructObjectInspector createStructObjectInspector(HCatSchema outputSchema) throws IOException {
+    static StructObjectInspector createStructObjectInspector(HCatSchema outputSchema) throws IOException {
 
-    if(outputSchema == null ) {
-      throw new IOException("Invalid output schema specified");
+        if (outputSchema == null) {
+            throw new IOException("Invalid output schema specified");
+        }
+
+        List<ObjectInspector> fieldInspectors = new ArrayList<ObjectInspector>();
+        List<String> fieldNames = new ArrayList<String>();
+
+        for (HCatFieldSchema hcatFieldSchema : outputSchema.getFields()) {
+            TypeInfo type = TypeInfoUtils.getTypeInfoFromTypeString(hcatFieldSchema.getTypeString());
+
+            fieldNames.add(hcatFieldSchema.getName());
+            fieldInspectors.add(getObjectInspector(type));
+        }
+
+        StructObjectInspector structInspector = ObjectInspectorFactory.
+            getStandardStructObjectInspector(fieldNames, fieldInspectors);
+        return structInspector;
     }
 
-    List<ObjectInspector> fieldInspectors = new ArrayList<ObjectInspector>();
-    List<String> fieldNames = new ArrayList<String>();
+    private static ObjectInspector getObjectInspector(TypeInfo type) throws IOException {
 
-    for(HCatFieldSchema hcatFieldSchema : outputSchema.getFields()) {
-      TypeInfo type = TypeInfoUtils.getTypeInfoFromTypeString(hcatFieldSchema.getTypeString());
+        switch (type.getCategory()) {
 
-      fieldNames.add(hcatFieldSchema.getName());
-      fieldInspectors.add(getObjectInspector(type));
+        case PRIMITIVE:
+            PrimitiveTypeInfo primitiveType = (PrimitiveTypeInfo) type;
+            return PrimitiveObjectInspectorFactory.
+                getPrimitiveJavaObjectInspector(primitiveType.getPrimitiveCategory());
+
+        case MAP:
+            MapTypeInfo mapType = (MapTypeInfo) type;
+            MapObjectInspector mapInspector = ObjectInspectorFactory.getStandardMapObjectInspector(
+                getObjectInspector(mapType.getMapKeyTypeInfo()), getObjectInspector(mapType.getMapValueTypeInfo()));
+            return mapInspector;
+
+        case LIST:
+            ListTypeInfo listType = (ListTypeInfo) type;
+            ListObjectInspector listInspector = ObjectInspectorFactory.getStandardListObjectInspector(
+                getObjectInspector(listType.getListElementTypeInfo()));
+            return listInspector;
+
+        case STRUCT:
+            StructTypeInfo structType = (StructTypeInfo) type;
+            List<TypeInfo> fieldTypes = structType.getAllStructFieldTypeInfos();
+
+            List<ObjectInspector> fieldInspectors = new ArrayList<ObjectInspector>();
+            for (TypeInfo fieldType : fieldTypes) {
+                fieldInspectors.add(getObjectInspector(fieldType));
+            }
+
+            StructObjectInspector structInspector = ObjectInspectorFactory.getStandardStructObjectInspector(
+                structType.getAllStructFieldNames(), fieldInspectors);
+            return structInspector;
+
+        default:
+            throw new IOException("Unknown field schema type");
+        }
     }
 
-    StructObjectInspector structInspector = ObjectInspectorFactory.
-        getStandardStructObjectInspector(fieldNames, fieldInspectors);
-    return structInspector;
-  }
-
-  private static ObjectInspector getObjectInspector(TypeInfo type) throws IOException {
-
-    switch(type.getCategory()) {
-
-    case PRIMITIVE :
-      PrimitiveTypeInfo primitiveType = (PrimitiveTypeInfo) type;
-      return PrimitiveObjectInspectorFactory.
-        getPrimitiveJavaObjectInspector(primitiveType.getPrimitiveCategory());
-
-    case MAP :
-      MapTypeInfo mapType = (MapTypeInfo) type;
-      MapObjectInspector mapInspector = ObjectInspectorFactory.getStandardMapObjectInspector(
-          getObjectInspector(mapType.getMapKeyTypeInfo()), getObjectInspector(mapType.getMapValueTypeInfo()));
-      return mapInspector;
-
-    case LIST :
-      ListTypeInfo listType = (ListTypeInfo) type;
-      ListObjectInspector listInspector = ObjectInspectorFactory.getStandardListObjectInspector(
-          getObjectInspector(listType.getListElementTypeInfo()));
-      return listInspector;
-
-    case STRUCT :
-      StructTypeInfo structType = (StructTypeInfo) type;
-      List<TypeInfo> fieldTypes = structType.getAllStructFieldTypeInfos();
-
-      List<ObjectInspector> fieldInspectors = new ArrayList<ObjectInspector>();
-      for(TypeInfo fieldType : fieldTypes) {
-        fieldInspectors.add(getObjectInspector(fieldType));
-      }
-
-      StructObjectInspector structInspector = ObjectInspectorFactory.getStandardStructObjectInspector(
-          structType.getAllStructFieldNames(), fieldInspectors);
-      return structInspector;
-
-    default :
-      throw new IOException("Unknown field schema type");
+    //TODO this has to find a better home, it's also hardcoded as default in hive would be nice
+    // if the default was decided by the serde
+    static void initializeOutputSerDe(SerDe serDe, Configuration conf, OutputJobInfo jobInfo)
+        throws SerDeException {
+        serDe.initialize(conf, getSerdeProperties(jobInfo.getTableInfo(), jobInfo.getOutputSchema()));
     }
-  }
 
-  //TODO this has to find a better home, it's also hardcoded as default in hive would be nice
-  // if the default was decided by the serde
-  static void initializeOutputSerDe(SerDe serDe, Configuration conf, OutputJobInfo jobInfo)
-      throws SerDeException {
-    serDe.initialize(conf, getSerdeProperties(jobInfo.getTableInfo(), jobInfo.getOutputSchema()));
-  }
-
-  static void initializeDeserializer(Deserializer deserializer, Configuration conf,
-      HCatTableInfo info, HCatSchema schema) throws SerDeException {
-    Properties props = getSerdeProperties(info, schema);
-    LOG.info("Initializing " + deserializer.getClass().getName() + " with properties " + props);
-    deserializer.initialize(conf, props);
-  }
-
-  private static Properties getSerdeProperties(HCatTableInfo info, HCatSchema s)
-      throws SerDeException {
-    Properties props = new Properties();
-    List<FieldSchema> fields = HCatUtil.getFieldSchemaList(s.getFields());
-    props.setProperty(org.apache.hadoop.hive.serde.Constants.LIST_COLUMNS,
-        MetaStoreUtils.getColumnNamesFromFieldSchema(fields));
-    props.setProperty(org.apache.hadoop.hive.serde.Constants.LIST_COLUMN_TYPES,
-        MetaStoreUtils.getColumnTypesFromFieldSchema(fields));
-
-    // setting these props to match LazySimpleSerde
-    props.setProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_NULL_FORMAT, "\\N");
-    props.setProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
-
-    //add props from params set in table schema
-    props.putAll(info.getStorerInfo().getProperties());
-
-    return props;
-  }
-
-static Reporter createReporter(TaskAttemptContext context) {
-      return new ProgressReporter(context);
-  }
-
-  /**
-   * Casts an InputSplit into a HCatSplit, providing a useful error message if the cast fails.
-   * @param split the InputSplit
-   * @return the HCatSplit
-   * @throws IOException
-   */
-  public static HCatSplit castToHCatSplit(InputSplit split) throws IOException {
-    if (split instanceof HCatSplit) {
-      return (HCatSplit) split;
-    } else {
-      throw new IOException("Split must be " + HCatSplit.class.getName()
-          + " but found " + split.getClass().getName());
+    static void initializeDeserializer(Deserializer deserializer, Configuration conf,
+                                       HCatTableInfo info, HCatSchema schema) throws SerDeException {
+        Properties props = getSerdeProperties(info, schema);
+        LOG.info("Initializing " + deserializer.getClass().getName() + " with properties " + props);
+        deserializer.initialize(conf, props);
     }
-  }
+
+    private static Properties getSerdeProperties(HCatTableInfo info, HCatSchema s)
+        throws SerDeException {
+        Properties props = new Properties();
+        List<FieldSchema> fields = HCatUtil.getFieldSchemaList(s.getFields());
+        props.setProperty(org.apache.hadoop.hive.serde.Constants.LIST_COLUMNS,
+            MetaStoreUtils.getColumnNamesFromFieldSchema(fields));
+        props.setProperty(org.apache.hadoop.hive.serde.Constants.LIST_COLUMN_TYPES,
+            MetaStoreUtils.getColumnTypesFromFieldSchema(fields));
+
+        // setting these props to match LazySimpleSerde
+        props.setProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_NULL_FORMAT, "\\N");
+        props.setProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
+
+        //add props from params set in table schema
+        props.putAll(info.getStorerInfo().getProperties());
+
+        return props;
+    }
+
+    static Reporter createReporter(TaskAttemptContext context) {
+        return new ProgressReporter(context);
+    }
+
+    /**
+     * Casts an InputSplit into a HCatSplit, providing a useful error message if the cast fails.
+     * @param split the InputSplit
+     * @return the HCatSplit
+     * @throws IOException
+     */
+    public static HCatSplit castToHCatSplit(InputSplit split) throws IOException {
+        if (split instanceof HCatSplit) {
+            return (HCatSplit) split;
+        } else {
+            throw new IOException("Split must be " + HCatSplit.class.getName()
+                + " but found " + split.getClass().getName());
+        }
+    }
 }
diff --git a/src/java/org/apache/hcatalog/mapreduce/MultiOutputFormat.java b/src/java/org/apache/hcatalog/mapreduce/MultiOutputFormat.java
index 134c052..284c46a 100644
--- a/src/java/org/apache/hcatalog/mapreduce/MultiOutputFormat.java
+++ b/src/java/org/apache/hcatalog/mapreduce/MultiOutputFormat.java
@@ -207,7 +207,7 @@
      * @throws InterruptedException
      */
     public static <K, V> void write(String alias, K key, V value, TaskInputOutputContext context)
-            throws IOException, InterruptedException {
+        throws IOException, InterruptedException {
         KeyValue<K, V> keyval = new KeyValue<K, V>(key, value);
         context.write(new Text(alias), keyval);
     }
@@ -227,14 +227,14 @@
 
     @Override
     public RecordWriter<Writable, Writable> getRecordWriter(TaskAttemptContext context)
-            throws IOException,
-            InterruptedException {
+        throws IOException,
+        InterruptedException {
         return new MultiRecordWriter(context);
     }
 
     @Override
     public OutputCommitter getOutputCommitter(TaskAttemptContext context) throws IOException,
-            InterruptedException {
+        InterruptedException {
         return new MultiOutputCommitter(context);
     }
 
diff --git a/src/java/org/apache/hcatalog/mapreduce/OutputFormatContainer.java b/src/java/org/apache/hcatalog/mapreduce/OutputFormatContainer.java
index db60253..50aeb35 100644
--- a/src/java/org/apache/hcatalog/mapreduce/OutputFormatContainer.java
+++ b/src/java/org/apache/hcatalog/mapreduce/OutputFormatContainer.java
@@ -38,7 +38,7 @@
     /**
      * @param of OutputFormat this instance will contain
      */
-    public OutputFormatContainer(org.apache.hadoop.mapred.OutputFormat<? super WritableComparable<?>,? super Writable> of) {
+    public OutputFormatContainer(org.apache.hadoop.mapred.OutputFormat<? super WritableComparable<?>, ? super Writable> of) {
         this.of = of;
     }
 
diff --git a/src/java/org/apache/hcatalog/mapreduce/OutputJobInfo.java b/src/java/org/apache/hcatalog/mapreduce/OutputJobInfo.java
index 0d77e25..f00cea5 100644
--- a/src/java/org/apache/hcatalog/mapreduce/OutputJobInfo.java
+++ b/src/java/org/apache/hcatalog/mapreduce/OutputJobInfo.java
@@ -31,238 +31,239 @@
 /** The class used to serialize and store the output related information  */
 public class OutputJobInfo implements Serializable {
 
-  /** The db and table names. */
-  private final String databaseName;
-  private final String tableName;
+    /** The db and table names. */
+    private final String databaseName;
+    private final String tableName;
 
-  /** The serialization version. */
-  private static final long serialVersionUID = 1L;
+    /** The serialization version. */
+    private static final long serialVersionUID = 1L;
 
-  /** The table info provided by user. */
-  private HCatTableInfo tableInfo;
+    /** The table info provided by user. */
+    private HCatTableInfo tableInfo;
 
-  /** The output schema. This is given to us by user.  This wont contain any
-   * partition columns ,even if user has specified them.
-   * */
-  private HCatSchema outputSchema;
+    /** The output schema. This is given to us by user.  This wont contain any
+     * partition columns ,even if user has specified them.
+     * */
+    private HCatSchema outputSchema;
 
-  /** The location of the partition being written */
-  private String location;
+    /** The location of the partition being written */
+    private String location;
 
-  /** The partition values to publish to, if used for output*/
-  private Map<String, String> partitionValues;
+    /** The partition values to publish to, if used for output*/
+    private Map<String, String> partitionValues;
 
-  private List<Integer> posOfPartCols;
-  private List<Integer> posOfDynPartCols;
+    private List<Integer> posOfPartCols;
+    private List<Integer> posOfDynPartCols;
 
-  private Properties properties;
+    private Properties properties;
 
-  private int maxDynamicPartitions;
+    private int maxDynamicPartitions;
 
-  /** List of keys for which values were not specified at write setup time, to be infered at write time */
-  private List<String> dynamicPartitioningKeys;
+    /** List of keys for which values were not specified at write setup time, to be infered at write time */
+    private List<String> dynamicPartitioningKeys;
 
-  private boolean harRequested;
+    private boolean harRequested;
 
-  /**
-   * Initializes a new OutputJobInfo instance
-   * for writing data from a table.
-   * @param databaseName the db name
-   * @param tableName the table name
-   * @param partitionValues The partition values to publish to, can be null or empty Map to
-   * work with hadoop security, the kerberos principal name of the server - else null
-   * The principal name should be of the form:
-   * <servicename>/_HOST@<realm> like "hcat/_HOST@myrealm.com"
-   * The special string _HOST will be replaced automatically with the correct host name
-   * indicate write to a unpartitioned table. For partitioned tables, this map should
-   * contain keys for all partition columns with corresponding values.
-   */
-  public static OutputJobInfo create(String databaseName,
-                                     String tableName,
-                                     Map<String, String> partitionValues) {
-    return new OutputJobInfo(databaseName,
-        tableName,
-        partitionValues);
-  }
+    /**
+     * Initializes a new OutputJobInfo instance
+     * for writing data from a table.
+     * @param databaseName the db name
+     * @param tableName the table name
+     * @param partitionValues The partition values to publish to, can be null or empty Map to
+     * work with hadoop security, the kerberos principal name of the server - else null
+     * The principal name should be of the form:
+     * <servicename>/_HOST@<realm> like "hcat/_HOST@myrealm.com"
+     * The special string _HOST will be replaced automatically with the correct host name
+     * indicate write to a unpartitioned table. For partitioned tables, this map should
+     * contain keys for all partition columns with corresponding values.
+     */
+    public static OutputJobInfo create(String databaseName,
+                                       String tableName,
+                                       Map<String, String> partitionValues) {
+        return new OutputJobInfo(databaseName,
+            tableName,
+            partitionValues);
+    }
 
-  private OutputJobInfo(String databaseName,
-                        String tableName,
-                        Map<String, String> partitionValues) {
-    this.databaseName =  (databaseName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : databaseName;
-    this.tableName = tableName;
-    this.partitionValues = partitionValues;
-    this.properties = new Properties();
-  }
+    private OutputJobInfo(String databaseName,
+                          String tableName,
+                          Map<String, String> partitionValues) {
+        this.databaseName = (databaseName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : databaseName;
+        this.tableName = tableName;
+        this.partitionValues = partitionValues;
+        this.properties = new Properties();
+    }
 
-  /**
-   * @return the posOfPartCols
-   */
-  protected List<Integer> getPosOfPartCols() {
-    return posOfPartCols;
-  }
+    /**
+     * @return the posOfPartCols
+     */
+    protected List<Integer> getPosOfPartCols() {
+        return posOfPartCols;
+    }
 
-  /**
-   * @return the posOfDynPartCols
-   */
-  protected List<Integer> getPosOfDynPartCols() {
-    return posOfDynPartCols;
-  }
+    /**
+     * @return the posOfDynPartCols
+     */
+    protected List<Integer> getPosOfDynPartCols() {
+        return posOfDynPartCols;
+    }
 
-  /**
-   * @param posOfPartCols the posOfPartCols to set
-   */
-  protected void setPosOfPartCols(List<Integer> posOfPartCols) {
-    // sorting the list in the descending order so that deletes happen back-to-front
-    Collections.sort(posOfPartCols, new Comparator<Integer> () {
-      @Override
-      public int compare(Integer earlier, Integer later) {
-        return (earlier > later) ? -1 : ((earlier == later) ? 0 : 1);
-      }
-    });
-    this.posOfPartCols = posOfPartCols;
-  }
+    /**
+     * @param posOfPartCols the posOfPartCols to set
+     */
+    protected void setPosOfPartCols(List<Integer> posOfPartCols) {
+        // sorting the list in the descending order so that deletes happen back-to-front
+        Collections.sort(posOfPartCols, new Comparator<Integer>() {
+            @Override
+            public int compare(Integer earlier, Integer later) {
+                return (earlier > later) ? -1 : ((earlier == later) ? 0 : 1);
+            }
+        });
+        this.posOfPartCols = posOfPartCols;
+    }
 
-  /**
+    /**
      * @param posOfDynPartCols the posOfDynPartCols to set
      */
     protected void setPosOfDynPartCols(List<Integer> posOfDynPartCols) {
-      // Important - no sorting here! We retain order, it's used to match with values at runtime
-      this.posOfDynPartCols = posOfDynPartCols;
+        // Important - no sorting here! We retain order, it's used to match with values at runtime
+        this.posOfDynPartCols = posOfDynPartCols;
     }
 
-  /**
-   * @return the tableInfo
-   */
-  public HCatTableInfo getTableInfo() {
-    return tableInfo;
-  }
+    /**
+     * @return the tableInfo
+     */
+    public HCatTableInfo getTableInfo() {
+        return tableInfo;
+    }
 
-  /**
-   * @return the outputSchema
-   */
-  public HCatSchema getOutputSchema() {
-    return outputSchema;
-  }
+    /**
+     * @return the outputSchema
+     */
+    public HCatSchema getOutputSchema() {
+        return outputSchema;
+    }
 
-  /**
-   * @param schema the outputSchema to set
-   */
-  public void setOutputSchema(HCatSchema schema) {
-    this.outputSchema = schema;
-  }
+    /**
+     * @param schema the outputSchema to set
+     */
+    public void setOutputSchema(HCatSchema schema) {
+        this.outputSchema = schema;
+    }
 
-  /**
-   * @return the location
-   */
-  public String getLocation() {
-    return location;
-  }
+    /**
+     * @return the location
+     */
+    public String getLocation() {
+        return location;
+    }
 
-  /**
-   * @param location location to write to
-   */
-  public void setLocation(String location) {
-    this.location = location;
-  }
-  /**
-   * Sets the value of partitionValues
-   * @param partitionValues the partition values to set
-   */
-  void setPartitionValues(Map<String, String>  partitionValues) {
-    this.partitionValues = partitionValues;
-  }
+    /**
+     * @param location location to write to
+     */
+    public void setLocation(String location) {
+        this.location = location;
+    }
 
-  /**
-   * Gets the value of partitionValues
-   * @return the partitionValues
-   */
-  public Map<String, String> getPartitionValues() {
-    return partitionValues;
-  }
+    /**
+     * Sets the value of partitionValues
+     * @param partitionValues the partition values to set
+     */
+    void setPartitionValues(Map<String, String> partitionValues) {
+        this.partitionValues = partitionValues;
+    }
 
-  /**
-   * set the tablInfo instance
-   * this should be the same instance
-   * determined by this object's DatabaseName and TableName
-   * @param tableInfo
-   */
-  void setTableInfo(HCatTableInfo tableInfo) {
-    this.tableInfo = tableInfo;
-  }
+    /**
+     * Gets the value of partitionValues
+     * @return the partitionValues
+     */
+    public Map<String, String> getPartitionValues() {
+        return partitionValues;
+    }
 
-  /**
-   * @return database name of table to write to
-   */
-  public String getDatabaseName() {
-    return databaseName;
-  }
+    /**
+     * set the tablInfo instance
+     * this should be the same instance
+     * determined by this object's DatabaseName and TableName
+     * @param tableInfo
+     */
+    void setTableInfo(HCatTableInfo tableInfo) {
+        this.tableInfo = tableInfo;
+    }
 
-  /**
-   * @return name of table to write to
-   */
-  public String getTableName() {
-    return tableName;
-  }
+    /**
+     * @return database name of table to write to
+     */
+    public String getDatabaseName() {
+        return databaseName;
+    }
 
-  /**
-   * Set/Get Property information to be passed down to *StorageHandler implementation
-   * put implementation specific storage handler configurations here
-   * @return the implementation specific job properties 
-   */
-  public Properties getProperties() {
-    return properties;
-  }
+    /**
+     * @return name of table to write to
+     */
+    public String getTableName() {
+        return tableName;
+    }
 
-  /**
-   * Set maximum number of allowable dynamic partitions
-   * @param maxDynamicPartitions
-   */
-  public void setMaximumDynamicPartitions(int maxDynamicPartitions){
-    this.maxDynamicPartitions = maxDynamicPartitions;
-  }
+    /**
+     * Set/Get Property information to be passed down to *StorageHandler implementation
+     * put implementation specific storage handler configurations here
+     * @return the implementation specific job properties
+     */
+    public Properties getProperties() {
+        return properties;
+    }
 
-  /**
-   * Returns maximum number of allowable dynamic partitions
-   * @return maximum number of allowable dynamic partitions
-   */
-  public int getMaxDynamicPartitions() {
-    return this.maxDynamicPartitions;
-  }
+    /**
+     * Set maximum number of allowable dynamic partitions
+     * @param maxDynamicPartitions
+     */
+    public void setMaximumDynamicPartitions(int maxDynamicPartitions) {
+        this.maxDynamicPartitions = maxDynamicPartitions;
+    }
 
-  /**
-   * Sets whether or not hadoop archiving has been requested for this job
-   * @param harRequested
-   */
-  public void setHarRequested(boolean harRequested){
-    this.harRequested = harRequested;
-  }
+    /**
+     * Returns maximum number of allowable dynamic partitions
+     * @return maximum number of allowable dynamic partitions
+     */
+    public int getMaxDynamicPartitions() {
+        return this.maxDynamicPartitions;
+    }
 
-  /**
-   * Returns whether or not hadoop archiving has been requested for this job
-   * @return whether or not hadoop archiving has been requested for this job
-   */
-  public boolean getHarRequested() {
-    return this.harRequested;
-  }
+    /**
+     * Sets whether or not hadoop archiving has been requested for this job
+     * @param harRequested
+     */
+    public void setHarRequested(boolean harRequested) {
+        this.harRequested = harRequested;
+    }
 
-  /**
-   * Returns whether or not Dynamic Partitioning is used
-   * @return whether or not dynamic partitioning is currently enabled and used
-   */
-  public boolean isDynamicPartitioningUsed() {
-    return !((dynamicPartitioningKeys == null) || (dynamicPartitioningKeys.isEmpty()));
-  }
+    /**
+     * Returns whether or not hadoop archiving has been requested for this job
+     * @return whether or not hadoop archiving has been requested for this job
+     */
+    public boolean getHarRequested() {
+        return this.harRequested;
+    }
 
-  /**
-   * Sets the list of dynamic partitioning keys used for outputting without specifying all the keys
-   * @param dynamicPartitioningKeys
-   */
-  public void setDynamicPartitioningKeys(List<String> dynamicPartitioningKeys) {
-    this.dynamicPartitioningKeys = dynamicPartitioningKeys;
-  }
+    /**
+     * Returns whether or not Dynamic Partitioning is used
+     * @return whether or not dynamic partitioning is currently enabled and used
+     */
+    public boolean isDynamicPartitioningUsed() {
+        return !((dynamicPartitioningKeys == null) || (dynamicPartitioningKeys.isEmpty()));
+    }
 
-  public List<String> getDynamicPartitioningKeys(){
-    return this.dynamicPartitioningKeys;
-  }
+    /**
+     * Sets the list of dynamic partitioning keys used for outputting without specifying all the keys
+     * @param dynamicPartitioningKeys
+     */
+    public void setDynamicPartitioningKeys(List<String> dynamicPartitioningKeys) {
+        this.dynamicPartitioningKeys = dynamicPartitioningKeys;
+    }
+
+    public List<String> getDynamicPartitioningKeys() {
+        return this.dynamicPartitioningKeys;
+    }
 
 }
diff --git a/src/java/org/apache/hcatalog/mapreduce/PartInfo.java b/src/java/org/apache/hcatalog/mapreduce/PartInfo.java
index 7fe0a9d..63bcb43 100644
--- a/src/java/org/apache/hcatalog/mapreduce/PartInfo.java
+++ b/src/java/org/apache/hcatalog/mapreduce/PartInfo.java
@@ -26,138 +26,138 @@
 /** The Class used to serialize the partition information read from the metadata server that maps to a partition. */
 public class PartInfo implements Serializable {
 
-  /** The serialization version */
-  private static final long serialVersionUID = 1L;
+    /** The serialization version */
+    private static final long serialVersionUID = 1L;
 
-  /** The partition schema. */
-  private final HCatSchema partitionSchema;
+    /** The partition schema. */
+    private final HCatSchema partitionSchema;
 
-  /** The information about which input storage handler to use */
-  private final String storageHandlerClassName;
-  private final String inputFormatClassName;
-  private final String outputFormatClassName;
-  private final String serdeClassName;
+    /** The information about which input storage handler to use */
+    private final String storageHandlerClassName;
+    private final String inputFormatClassName;
+    private final String outputFormatClassName;
+    private final String serdeClassName;
 
-  /** HCat-specific properties set at the partition */
-  private final Properties hcatProperties;
+    /** HCat-specific properties set at the partition */
+    private final Properties hcatProperties;
 
-  /** The data location. */
-  private final String location;
+    /** The data location. */
+    private final String location;
 
-  /** The map of partition key names and their values. */
-  private Map<String,String> partitionValues;
+    /** The map of partition key names and their values. */
+    private Map<String, String> partitionValues;
 
-  /** Job properties associated with this parition */
-  Map<String,String> jobProperties;
+    /** Job properties associated with this parition */
+    Map<String, String> jobProperties;
 
-  /** the table info associated with this partition */
-  HCatTableInfo tableInfo;
+    /** the table info associated with this partition */
+    HCatTableInfo tableInfo;
 
-  /**
-   * Instantiates a new hcat partition info.
-   * @param partitionSchema the partition schema
-   * @param storageHandler the storage handler
-   * @param location the location
-   * @param hcatProperties hcat-specific properties at the partition
-   * @param jobProperties the job properties
-   * @param tableInfo the table information
-   */
-  public PartInfo(HCatSchema partitionSchema, HCatStorageHandler storageHandler,
-                  String location, Properties hcatProperties, 
-                  Map<String,String> jobProperties, HCatTableInfo tableInfo){
-    this.partitionSchema = partitionSchema;
-    this.location = location;
-    this.hcatProperties = hcatProperties;
-    this.jobProperties = jobProperties;
-    this.tableInfo = tableInfo;
+    /**
+     * Instantiates a new hcat partition info.
+     * @param partitionSchema the partition schema
+     * @param storageHandler the storage handler
+     * @param location the location
+     * @param hcatProperties hcat-specific properties at the partition
+     * @param jobProperties the job properties
+     * @param tableInfo the table information
+     */
+    public PartInfo(HCatSchema partitionSchema, HCatStorageHandler storageHandler,
+                    String location, Properties hcatProperties,
+                    Map<String, String> jobProperties, HCatTableInfo tableInfo) {
+        this.partitionSchema = partitionSchema;
+        this.location = location;
+        this.hcatProperties = hcatProperties;
+        this.jobProperties = jobProperties;
+        this.tableInfo = tableInfo;
 
-    this.storageHandlerClassName = storageHandler.getClass().getName();
-    this.inputFormatClassName = storageHandler.getInputFormatClass().getName();
-    this.serdeClassName = storageHandler.getSerDeClass().getName();
-    this.outputFormatClassName = storageHandler.getOutputFormatClass().getName();
-}
+        this.storageHandlerClassName = storageHandler.getClass().getName();
+        this.inputFormatClassName = storageHandler.getInputFormatClass().getName();
+        this.serdeClassName = storageHandler.getSerDeClass().getName();
+        this.outputFormatClassName = storageHandler.getOutputFormatClass().getName();
+    }
 
-  /**
-   * Gets the value of partitionSchema.
-   * @return the partitionSchema
-   */
-  public HCatSchema getPartitionSchema() {
-    return partitionSchema;
-  }
+    /**
+     * Gets the value of partitionSchema.
+     * @return the partitionSchema
+     */
+    public HCatSchema getPartitionSchema() {
+        return partitionSchema;
+    }
 
-  /**
-   * @return the storage handler class name
-   */
-  public String getStorageHandlerClassName() {
-    return storageHandlerClassName;
-  }
+    /**
+     * @return the storage handler class name
+     */
+    public String getStorageHandlerClassName() {
+        return storageHandlerClassName;
+    }
 
-  /**
-   * @return the inputFormatClassName
-   */
-  public String getInputFormatClassName() {
-    return inputFormatClassName;
-  }
+    /**
+     * @return the inputFormatClassName
+     */
+    public String getInputFormatClassName() {
+        return inputFormatClassName;
+    }
 
-  /**
-   * @return the outputFormatClassName
-   */
-  public String getOutputFormatClassName() {
-    return outputFormatClassName;
-  }
+    /**
+     * @return the outputFormatClassName
+     */
+    public String getOutputFormatClassName() {
+        return outputFormatClassName;
+    }
 
-  /**
-   * @return the serdeClassName
-   */
-  public String getSerdeClassName() {
-    return serdeClassName;
-  }
+    /**
+     * @return the serdeClassName
+     */
+    public String getSerdeClassName() {
+        return serdeClassName;
+    }
 
-  /**
-   * Gets the input storage handler properties.
-   * @return HCat-specific properties set at the partition 
-   */
-  public Properties getInputStorageHandlerProperties() {
-    return hcatProperties;
-  }
+    /**
+     * Gets the input storage handler properties.
+     * @return HCat-specific properties set at the partition
+     */
+    public Properties getInputStorageHandlerProperties() {
+        return hcatProperties;
+    }
 
-  /**
-   * Gets the value of location.
-   * @return the location
-   */
-  public String getLocation() {
-    return location;
-  }
+    /**
+     * Gets the value of location.
+     * @return the location
+     */
+    public String getLocation() {
+        return location;
+    }
 
-  /**
-   * Sets the partition values.
-   * @param partitionValues the new partition values
-   */
-  public void setPartitionValues(Map<String,String> partitionValues) {
-    this.partitionValues = partitionValues;
-  }
+    /**
+     * Sets the partition values.
+     * @param partitionValues the new partition values
+     */
+    public void setPartitionValues(Map<String, String> partitionValues) {
+        this.partitionValues = partitionValues;
+    }
 
-  /**
-   * Gets the partition values.
-   * @return the partition values
-   */
-  public Map<String,String> getPartitionValues() {
-    return partitionValues;
-  }
+    /**
+     * Gets the partition values.
+     * @return the partition values
+     */
+    public Map<String, String> getPartitionValues() {
+        return partitionValues;
+    }
 
-  /**
-   * Gets the job properties.
-   * @return a map of the job properties
-   */
-  public Map<String,String> getJobProperties() {
-    return jobProperties;
-  }
+    /**
+     * Gets the job properties.
+     * @return a map of the job properties
+     */
+    public Map<String, String> getJobProperties() {
+        return jobProperties;
+    }
 
-  /**
-   * Gets the HCatalog table information.
-   * @return the table information
-   */
-  public HCatTableInfo getTableInfo() {
-    return tableInfo;
-  }
+    /**
+     * Gets the HCatalog table information.
+     * @return the table information
+     */
+    public HCatTableInfo getTableInfo() {
+        return tableInfo;
+    }
 }
diff --git a/src/java/org/apache/hcatalog/mapreduce/ProgressReporter.java b/src/java/org/apache/hcatalog/mapreduce/ProgressReporter.java
index 0ef359c..936204b 100644
--- a/src/java/org/apache/hcatalog/mapreduce/ProgressReporter.java
+++ b/src/java/org/apache/hcatalog/mapreduce/ProgressReporter.java
@@ -27,65 +27,65 @@
 
 class ProgressReporter extends StatusReporter implements Reporter {
 
-  private TaskInputOutputContext context = null;
-  private TaskAttemptContext taskAttemptContext = null;
+    private TaskInputOutputContext context = null;
+    private TaskAttemptContext taskAttemptContext = null;
 
-  public ProgressReporter(TaskAttemptContext context) {
-    if (context instanceof TaskInputOutputContext) {
-      this.context = (TaskInputOutputContext) context;
-    } else {
-      taskAttemptContext = context;
+    public ProgressReporter(TaskAttemptContext context) {
+        if (context instanceof TaskInputOutputContext) {
+            this.context = (TaskInputOutputContext) context;
+        } else {
+            taskAttemptContext = context;
+        }
     }
-  }
 
-  @Override
-  public void setStatus(String status) {
-    if (context != null) {
-      context.setStatus(status);
+    @Override
+    public void setStatus(String status) {
+        if (context != null) {
+            context.setStatus(status);
+        }
     }
-  }
 
-  @Override
-  public Counters.Counter getCounter(Enum<?> name) {
-    return (context != null) ? (Counters.Counter) context.getCounter(name) : null;
-  }
-
-  @Override
-  public Counters.Counter getCounter(String group, String name) {
-    return (context != null) ? (Counters.Counter) context.getCounter(group, name) : null;
-  }
-
-  @Override
-  public void incrCounter(Enum<?> key, long amount) {
-    if (context != null) {
-      context.getCounter(key).increment(amount);
+    @Override
+    public Counters.Counter getCounter(Enum<?> name) {
+        return (context != null) ? (Counters.Counter) context.getCounter(name) : null;
     }
-  }
 
-  @Override
-  public void incrCounter(String group, String counter, long amount) {
-    if (context != null) {
-      context.getCounter(group, counter).increment(amount);
+    @Override
+    public Counters.Counter getCounter(String group, String name) {
+        return (context != null) ? (Counters.Counter) context.getCounter(group, name) : null;
     }
-  }
 
-  @Override
-  public InputSplit getInputSplit() throws UnsupportedOperationException {
-    return null;
-  }
-
-  public float getProgress() {
-      /* Required to build against 0.23 Reporter and StatusReporter. */
-      /* TODO: determine the progress. */
-      return 0.0f;
-  }
-
-  @Override
-  public void progress() {
-    if (context != null) {
-      context.progress();
-    } else {
-      taskAttemptContext.progress();
+    @Override
+    public void incrCounter(Enum<?> key, long amount) {
+        if (context != null) {
+            context.getCounter(key).increment(amount);
+        }
     }
-  }
+
+    @Override
+    public void incrCounter(String group, String counter, long amount) {
+        if (context != null) {
+            context.getCounter(group, counter).increment(amount);
+        }
+    }
+
+    @Override
+    public InputSplit getInputSplit() throws UnsupportedOperationException {
+        return null;
+    }
+
+    public float getProgress() {
+        /* Required to build against 0.23 Reporter and StatusReporter. */
+        /* TODO: determine the progress. */
+        return 0.0f;
+    }
+
+    @Override
+    public void progress() {
+        if (context != null) {
+            context.progress();
+        } else {
+            taskAttemptContext.progress();
+        }
+    }
 }
diff --git a/src/java/org/apache/hcatalog/mapreduce/Security.java b/src/java/org/apache/hcatalog/mapreduce/Security.java
index 2ff7e2f..041a898 100644
--- a/src/java/org/apache/hcatalog/mapreduce/Security.java
+++ b/src/java/org/apache/hcatalog/mapreduce/Security.java
@@ -44,134 +44,134 @@
 
 final class Security {
 
-  private static final Logger LOG = LoggerFactory.getLogger(HCatOutputFormat.class);
-  
-  // making sure this is not initialized unless needed
-  private static final class LazyHolder {
-    public static final Security INSTANCE = new Security();
-  }
+    private static final Logger LOG = LoggerFactory.getLogger(HCatOutputFormat.class);
 
-  public static Security getInstance() {
-    return LazyHolder.INSTANCE;
-  }
-
-  boolean isSecurityEnabled() {
-      try {
-          Method m = UserGroupInformation.class.getMethod("isSecurityEnabled");
-          return (Boolean)m.invoke(null, (Object[])null);
-      } catch (NoSuchMethodException e) {
-          LOG.info("Security is not supported by this version of hadoop.", e);
-      } catch (InvocationTargetException e) {
-          String msg = "Failed to call isSecurityEnabled()";
-          LOG.info(msg, e);
-          throw new IllegalStateException(msg,e);
-      } catch (IllegalAccessException e) {
-          String msg = "Failed to call isSecurityEnabled()";
-          LOG.info(msg, e);
-          throw new IllegalStateException(msg,e);
-      }
-      return false;
-  }
-
-  // a signature string to associate with a HCatTableInfo - essentially
-  // a concatenation of dbname, tablename and partition keyvalues.
-  String getTokenSignature(OutputJobInfo outputJobInfo) {
-    StringBuilder result = new StringBuilder("");
-    String dbName = outputJobInfo.getDatabaseName();
-    if(dbName != null) {
-      result.append(dbName);
+    // making sure this is not initialized unless needed
+    private static final class LazyHolder {
+        public static final Security INSTANCE = new Security();
     }
-    String tableName = outputJobInfo.getTableName();
-    if(tableName != null) {
-      result.append("." + tableName);
+
+    public static Security getInstance() {
+        return LazyHolder.INSTANCE;
     }
-    Map<String, String> partValues = outputJobInfo.getPartitionValues();
-    if(partValues != null) {
-      for(Entry<String, String> entry: partValues.entrySet()) {
-        result.append("/");
-        result.append(entry.getKey());
-        result.append("=");
-        result.append(entry.getValue());
-      }
 
-    }
-    return result.toString();
-  }
-
-  void handleSecurity(
-      Job job, 
-      OutputJobInfo outputJobInfo,
-      HiveMetaStoreClient client, 
-      Configuration conf,
-      boolean harRequested)
-      throws IOException, MetaException, TException, Exception {
-    if(UserGroupInformation.isSecurityEnabled()){
-      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-      // check if oozie has set up a hcat deleg. token - if so use it
-      TokenSelector<? extends TokenIdentifier> hiveTokenSelector = new DelegationTokenSelector();
-      //Oozie does not change the service field of the token
-      //hence by default token generation will have a value of "new Text("")"
-      //HiveClient will look for a use TokenSelector.selectToken() with service
-      //set to empty "Text" if hive.metastore.token.signature property is set to null
-      Token<? extends TokenIdentifier> hiveToken = hiveTokenSelector.selectToken(
-        new Text(), ugi.getTokens());
-      if(hiveToken == null) {
-        // we did not get token set up by oozie, let's get them ourselves here.
-        // we essentially get a token per unique Output HCatTableInfo - this is
-        // done because through Pig, setOutput() method is called multiple times
-        // We want to only get the token once per unique output HCatTableInfo -
-        // we cannot just get one token since in multi-query case (> 1 store in 1 job)
-        // or the case when a single pig script results in > 1 jobs, the single
-        // token will get cancelled by the output committer and the subsequent
-        // stores will fail - by tying the token with the concatenation of
-        // dbname, tablename and partition keyvalues of the output
-        // TableInfo, we can have as many tokens as there are stores and the TokenSelector
-        // will correctly pick the right tokens which the committer will use and
-        // cancel.
-        String tokenSignature = getTokenSignature(outputJobInfo);
-        // get delegation tokens from hcat server and store them into the "job"
-        // These will be used in to publish partitions to
-        // hcat normally in OutputCommitter.commitJob()
-        // when the JobTracker in Hadoop MapReduce starts supporting renewal of
-        // arbitrary tokens, the renewer should be the principal of the JobTracker
-        hiveToken = HCatUtil.extractThriftToken(client.getDelegationToken(ugi.getUserName()), tokenSignature);
-
-        if (harRequested){
-          TokenSelector<? extends TokenIdentifier> jtTokenSelector =
-            new org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenSelector();
-          Token jtToken = jtTokenSelector.selectToken(org.apache.hadoop.security.SecurityUtil.buildTokenService(
-                      HCatHadoopShims.Instance.get().getResourceManagerAddress(conf)), ugi.getTokens());
-          if(jtToken == null) {
-            //we don't need to cancel this token as the TokenRenewer for JT tokens
-            //takes care of cancelling them
-            job.getCredentials().addToken(new Text("hcat jt token"),
-                HCatUtil.getJobTrackerDelegationToken(conf,ugi.getUserName()));
-          }
+    boolean isSecurityEnabled() {
+        try {
+            Method m = UserGroupInformation.class.getMethod("isSecurityEnabled");
+            return (Boolean) m.invoke(null, (Object[]) null);
+        } catch (NoSuchMethodException e) {
+            LOG.info("Security is not supported by this version of hadoop.", e);
+        } catch (InvocationTargetException e) {
+            String msg = "Failed to call isSecurityEnabled()";
+            LOG.info(msg, e);
+            throw new IllegalStateException(msg, e);
+        } catch (IllegalAccessException e) {
+            String msg = "Failed to call isSecurityEnabled()";
+            LOG.info(msg, e);
+            throw new IllegalStateException(msg, e);
         }
-        
-        job.getCredentials().addToken(new Text(ugi.getUserName() +"_"+ tokenSignature), hiveToken);
-        // this will be used by the outputcommitter to pass on to the metastore client
-        // which in turn will pass on to the TokenSelector so that it can select
-        // the right token.
-        job.getConfiguration().set(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE, tokenSignature);
-      }
+        return false;
     }
-  }
 
-  // we should cancel hcat token if it was acquired by hcat
-  // and not if it was supplied (ie Oozie). In the latter
-  // case the HCAT_KEY_TOKEN_SIGNATURE property in the conf will not be set
-  void cancelToken(HiveMetaStoreClient client, JobContext context) throws IOException, MetaException {
-    String tokenStrForm = client.getTokenStrForm();
-    if(tokenStrForm != null && context.getConfiguration().get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
-      try {
-        client.cancelDelegationToken(tokenStrForm);
-      } catch (TException e) {
-        String msg = "Failed to cancel delegation token";
-        LOG.error(msg,e);
-        throw new IOException(msg,e);
-      }
+    // a signature string to associate with a HCatTableInfo - essentially
+    // a concatenation of dbname, tablename and partition keyvalues.
+    String getTokenSignature(OutputJobInfo outputJobInfo) {
+        StringBuilder result = new StringBuilder("");
+        String dbName = outputJobInfo.getDatabaseName();
+        if (dbName != null) {
+            result.append(dbName);
+        }
+        String tableName = outputJobInfo.getTableName();
+        if (tableName != null) {
+            result.append("." + tableName);
+        }
+        Map<String, String> partValues = outputJobInfo.getPartitionValues();
+        if (partValues != null) {
+            for (Entry<String, String> entry : partValues.entrySet()) {
+                result.append("/");
+                result.append(entry.getKey());
+                result.append("=");
+                result.append(entry.getValue());
+            }
+
+        }
+        return result.toString();
     }
-  }
+
+    void handleSecurity(
+        Job job,
+        OutputJobInfo outputJobInfo,
+        HiveMetaStoreClient client,
+        Configuration conf,
+        boolean harRequested)
+        throws IOException, MetaException, TException, Exception {
+        if (UserGroupInformation.isSecurityEnabled()) {
+            UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+            // check if oozie has set up a hcat deleg. token - if so use it
+            TokenSelector<? extends TokenIdentifier> hiveTokenSelector = new DelegationTokenSelector();
+            //Oozie does not change the service field of the token
+            //hence by default token generation will have a value of "new Text("")"
+            //HiveClient will look for a use TokenSelector.selectToken() with service
+            //set to empty "Text" if hive.metastore.token.signature property is set to null
+            Token<? extends TokenIdentifier> hiveToken = hiveTokenSelector.selectToken(
+                new Text(), ugi.getTokens());
+            if (hiveToken == null) {
+                // we did not get token set up by oozie, let's get them ourselves here.
+                // we essentially get a token per unique Output HCatTableInfo - this is
+                // done because through Pig, setOutput() method is called multiple times
+                // We want to only get the token once per unique output HCatTableInfo -
+                // we cannot just get one token since in multi-query case (> 1 store in 1 job)
+                // or the case when a single pig script results in > 1 jobs, the single
+                // token will get cancelled by the output committer and the subsequent
+                // stores will fail - by tying the token with the concatenation of
+                // dbname, tablename and partition keyvalues of the output
+                // TableInfo, we can have as many tokens as there are stores and the TokenSelector
+                // will correctly pick the right tokens which the committer will use and
+                // cancel.
+                String tokenSignature = getTokenSignature(outputJobInfo);
+                // get delegation tokens from hcat server and store them into the "job"
+                // These will be used in to publish partitions to
+                // hcat normally in OutputCommitter.commitJob()
+                // when the JobTracker in Hadoop MapReduce starts supporting renewal of
+                // arbitrary tokens, the renewer should be the principal of the JobTracker
+                hiveToken = HCatUtil.extractThriftToken(client.getDelegationToken(ugi.getUserName()), tokenSignature);
+
+                if (harRequested) {
+                    TokenSelector<? extends TokenIdentifier> jtTokenSelector =
+                        new org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenSelector();
+                    Token jtToken = jtTokenSelector.selectToken(org.apache.hadoop.security.SecurityUtil.buildTokenService(
+                        HCatHadoopShims.Instance.get().getResourceManagerAddress(conf)), ugi.getTokens());
+                    if (jtToken == null) {
+                        //we don't need to cancel this token as the TokenRenewer for JT tokens
+                        //takes care of cancelling them
+                        job.getCredentials().addToken(new Text("hcat jt token"),
+                            HCatUtil.getJobTrackerDelegationToken(conf, ugi.getUserName()));
+                    }
+                }
+
+                job.getCredentials().addToken(new Text(ugi.getUserName() + "_" + tokenSignature), hiveToken);
+                // this will be used by the outputcommitter to pass on to the metastore client
+                // which in turn will pass on to the TokenSelector so that it can select
+                // the right token.
+                job.getConfiguration().set(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE, tokenSignature);
+            }
+        }
+    }
+
+    // we should cancel hcat token if it was acquired by hcat
+    // and not if it was supplied (ie Oozie). In the latter
+    // case the HCAT_KEY_TOKEN_SIGNATURE property in the conf will not be set
+    void cancelToken(HiveMetaStoreClient client, JobContext context) throws IOException, MetaException {
+        String tokenStrForm = client.getTokenStrForm();
+        if (tokenStrForm != null && context.getConfiguration().get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
+            try {
+                client.cancelDelegationToken(tokenStrForm);
+            } catch (TException e) {
+                String msg = "Failed to cancel delegation token";
+                LOG.error(msg, e);
+                throw new IOException(msg, e);
+            }
+        }
+    }
 
 }
diff --git a/src/java/org/apache/hcatalog/mapreduce/StorerInfo.java b/src/java/org/apache/hcatalog/mapreduce/StorerInfo.java
index e404b61..57ec4c3 100644
--- a/src/java/org/apache/hcatalog/mapreduce/StorerInfo.java
+++ b/src/java/org/apache/hcatalog/mapreduce/StorerInfo.java
@@ -47,12 +47,12 @@
      * @param properties the properties for the storage handler
      */
     public StorerInfo(String ifClass, String ofClass, String serdeClass, String storageHandlerClass, Properties properties) {
-      super();
-      this.ifClass =ifClass;
-      this.ofClass = ofClass;
-      this.serdeClass = serdeClass;
-      this.storageHandlerClass = storageHandlerClass;
-      this.properties = properties;
+        super();
+        this.ifClass = ifClass;
+        this.ofClass = ofClass;
+        this.serdeClass = serdeClass;
+        this.storageHandlerClass = storageHandlerClass;
+        this.properties = properties;
     }
 
     /**
@@ -94,14 +94,14 @@
      * @return the storer properties
      */
     public Properties getProperties() {
-      return properties;
+        return properties;
     }
 
     /**
      * @param properties the storer properties to set 
      */
     public void setProperties(Properties properties) {
-      this.properties = properties;
+        this.properties = properties;
     }
 
 
diff --git a/src/java/org/apache/hcatalog/oozie/JavaAction.java b/src/java/org/apache/hcatalog/oozie/JavaAction.java
index ef20ce5..f45f0e3 100644
--- a/src/java/org/apache/hcatalog/oozie/JavaAction.java
+++ b/src/java/org/apache/hcatalog/oozie/JavaAction.java
@@ -27,14 +27,14 @@
 
 public class JavaAction {
 
-  public static void main(String[] args) throws Exception{
+    public static void main(String[] args) throws Exception {
 
-    HiveConf conf = new HiveConf();
-    conf.addResource(new Path("file:///", System.getProperty("oozie.action.conf.xml")));
-    conf.setVar(ConfVars.SEMANTIC_ANALYZER_HOOK, HCatSemanticAnalyzer.class.getName());
-    conf.setBoolVar(ConfVars.METASTORE_USE_THRIFT_SASL, true);
-    SessionState.start(new CliSessionState(conf));
-    new CliDriver().processLine(args[0]);
-  }
+        HiveConf conf = new HiveConf();
+        conf.addResource(new Path("file:///", System.getProperty("oozie.action.conf.xml")));
+        conf.setVar(ConfVars.SEMANTIC_ANALYZER_HOOK, HCatSemanticAnalyzer.class.getName());
+        conf.setBoolVar(ConfVars.METASTORE_USE_THRIFT_SASL, true);
+        SessionState.start(new CliSessionState(conf));
+        new CliDriver().processLine(args[0]);
+    }
 
 }
diff --git a/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceInputFormat.java b/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceInputFormat.java
index 6ca0cd8..5301530 100644
--- a/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceInputFormat.java
+++ b/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceInputFormat.java
@@ -29,22 +29,21 @@
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
 
-public class RCFileMapReduceInputFormat<K extends LongWritable,V extends BytesRefArrayWritable>
-extends FileInputFormat<LongWritable, BytesRefArrayWritable>
-{
+public class RCFileMapReduceInputFormat<K extends LongWritable, V extends BytesRefArrayWritable>
+    extends FileInputFormat<LongWritable, BytesRefArrayWritable> {
 
-  @Override
-  public RecordReader<LongWritable,BytesRefArrayWritable> createRecordReader(InputSplit split,
-      TaskAttemptContext context) throws IOException, InterruptedException {
+    @Override
+    public RecordReader<LongWritable, BytesRefArrayWritable> createRecordReader(InputSplit split,
+                                                                                TaskAttemptContext context) throws IOException, InterruptedException {
 
-    context.setStatus(split.toString());
-    return new RCFileMapReduceRecordReader<LongWritable,BytesRefArrayWritable>();
-  }
+        context.setStatus(split.toString());
+        return new RCFileMapReduceRecordReader<LongWritable, BytesRefArrayWritable>();
+    }
 
-  @Override
-  public List<InputSplit> getSplits(JobContext job) throws IOException {
+    @Override
+    public List<InputSplit> getSplits(JobContext job) throws IOException {
 
-    job.getConfiguration().setLong("mapred.min.split.size", SequenceFile.SYNC_INTERVAL);
-    return super.getSplits(job);
-  }
+        job.getConfiguration().setLong("mapred.min.split.size", SequenceFile.SYNC_INTERVAL);
+        return super.getSplits(job);
+    }
 }
diff --git a/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceOutputFormat.java b/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceOutputFormat.java
index 792d7a2..665b2dc 100644
--- a/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceOutputFormat.java
+++ b/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceOutputFormat.java
@@ -39,66 +39,66 @@
 public class RCFileMapReduceOutputFormat extends
     FileOutputFormat<WritableComparable<?>, BytesRefArrayWritable> {
 
-  /**
-   * Set number of columns into the given configuration.
-   * @param conf
-   *          configuration instance which need to set the column number
-   * @param columnNum
-   *          column number for RCFile's Writer
-   *
-   */
-  public static void setColumnNumber(Configuration conf, int columnNum) {
-    assert columnNum > 0;
-    conf.setInt(RCFile.COLUMN_NUMBER_CONF_STR, columnNum);
-  }
-
-  /* (non-Javadoc)
-   * @see org.apache.hadoop.mapreduce.lib.output.FileOutputFormat#getRecordWriter(org.apache.hadoop.mapreduce.TaskAttemptContext)
-   */
-  @Override
-  public org.apache.hadoop.mapreduce.RecordWriter<WritableComparable<?>, BytesRefArrayWritable> getRecordWriter(
-      TaskAttemptContext task) throws IOException, InterruptedException {
-
-    //FileOutputFormat.getWorkOutputPath takes TaskInputOutputContext instead of
-    //TaskAttemptContext, so can't use that here
-    FileOutputCommitter committer = (FileOutputCommitter) getOutputCommitter(task);
-    Path outputPath = committer.getWorkPath();
-
-    FileSystem fs = outputPath.getFileSystem(task.getConfiguration());
-
-    if (!fs.exists(outputPath)) {
-      fs.mkdirs(outputPath);
+    /**
+     * Set number of columns into the given configuration.
+     * @param conf
+     *          configuration instance which need to set the column number
+     * @param columnNum
+     *          column number for RCFile's Writer
+     *
+     */
+    public static void setColumnNumber(Configuration conf, int columnNum) {
+        assert columnNum > 0;
+        conf.setInt(RCFile.COLUMN_NUMBER_CONF_STR, columnNum);
     }
 
-    Path file = getDefaultWorkFile(task, "");
+    /* (non-Javadoc)
+    * @see org.apache.hadoop.mapreduce.lib.output.FileOutputFormat#getRecordWriter(org.apache.hadoop.mapreduce.TaskAttemptContext)
+    */
+    @Override
+    public org.apache.hadoop.mapreduce.RecordWriter<WritableComparable<?>, BytesRefArrayWritable> getRecordWriter(
+        TaskAttemptContext task) throws IOException, InterruptedException {
 
-    CompressionCodec codec = null;
-    if (getCompressOutput(task)) {
-      Class<?> codecClass = getOutputCompressorClass(task, DefaultCodec.class);
-      codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, task.getConfiguration());
+        //FileOutputFormat.getWorkOutputPath takes TaskInputOutputContext instead of
+        //TaskAttemptContext, so can't use that here
+        FileOutputCommitter committer = (FileOutputCommitter) getOutputCommitter(task);
+        Path outputPath = committer.getWorkPath();
+
+        FileSystem fs = outputPath.getFileSystem(task.getConfiguration());
+
+        if (!fs.exists(outputPath)) {
+            fs.mkdirs(outputPath);
+        }
+
+        Path file = getDefaultWorkFile(task, "");
+
+        CompressionCodec codec = null;
+        if (getCompressOutput(task)) {
+            Class<?> codecClass = getOutputCompressorClass(task, DefaultCodec.class);
+            codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, task.getConfiguration());
+        }
+
+        final RCFile.Writer out = new RCFile.Writer(fs, task.getConfiguration(), file, task, codec);
+
+        return new RecordWriter<WritableComparable<?>, BytesRefArrayWritable>() {
+
+            /* (non-Javadoc)
+            * @see org.apache.hadoop.mapreduce.RecordWriter#write(java.lang.Object, java.lang.Object)
+            */
+            @Override
+            public void write(WritableComparable<?> key, BytesRefArrayWritable value)
+                throws IOException {
+                out.append(value);
+            }
+
+            /* (non-Javadoc)
+            * @see org.apache.hadoop.mapreduce.RecordWriter#close(org.apache.hadoop.mapreduce.TaskAttemptContext)
+            */
+            @Override
+            public void close(TaskAttemptContext task) throws IOException, InterruptedException {
+                out.close();
+            }
+        };
     }
 
-    final RCFile.Writer out = new RCFile.Writer(fs, task.getConfiguration(), file, task, codec);
-
-    return new RecordWriter<WritableComparable<?>, BytesRefArrayWritable>() {
-
-      /* (non-Javadoc)
-       * @see org.apache.hadoop.mapreduce.RecordWriter#write(java.lang.Object, java.lang.Object)
-       */
-      @Override
-      public void write(WritableComparable<?> key, BytesRefArrayWritable value)
-          throws IOException {
-        out.append(value);
-      }
-
-      /* (non-Javadoc)
-       * @see org.apache.hadoop.mapreduce.RecordWriter#close(org.apache.hadoop.mapreduce.TaskAttemptContext)
-       */
-      @Override
-      public void close(TaskAttemptContext task) throws IOException, InterruptedException {
-        out.close();
-      }
-    };
-  }
-
 }
diff --git a/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceRecordReader.java b/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceRecordReader.java
index 7a30eaa..126866a 100644
--- a/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceRecordReader.java
+++ b/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceRecordReader.java
@@ -31,90 +31,90 @@
 import org.apache.hadoop.mapreduce.lib.input.FileSplit;
 
 public class RCFileMapReduceRecordReader<K extends LongWritable, V extends BytesRefArrayWritable>
-  extends RecordReader<LongWritable,BytesRefArrayWritable>{
+    extends RecordReader<LongWritable, BytesRefArrayWritable> {
 
-  private Reader in;
-  private long start;
-  private long end;
-  private boolean more = true;
+    private Reader in;
+    private long start;
+    private long end;
+    private boolean more = true;
 
-  // key and value objects are created once in initialize() and then reused
-  // for every getCurrentKey() and getCurrentValue() call. This is important
-  // since RCFile makes an assumption of this fact.
+    // key and value objects are created once in initialize() and then reused
+    // for every getCurrentKey() and getCurrentValue() call. This is important
+    // since RCFile makes an assumption of this fact.
 
-  private LongWritable key;
-  private BytesRefArrayWritable value;
+    private LongWritable key;
+    private BytesRefArrayWritable value;
 
-  @Override
-  public void close() throws IOException {
-    in.close();
-  }
-
-  @Override
-  public LongWritable getCurrentKey() throws IOException, InterruptedException {
-    return key;
-  }
-
-  @Override
-  public BytesRefArrayWritable getCurrentValue() throws IOException, InterruptedException {
-    return value;
-  }
-
-  @Override
-  public float getProgress() throws IOException, InterruptedException {
-    if (end == start) {
-      return 0.0f;
-    } else {
-      return Math.min(1.0f, (in.getPosition() - start) / (float) (end - start));
-    }
-  }
-
-  @Override
-  public boolean nextKeyValue() throws IOException, InterruptedException {
-
-    more = next(key);
-    if (more) {
-      in.getCurrentRow(value);
+    @Override
+    public void close() throws IOException {
+        in.close();
     }
 
-    return more;
-  }
-
-  private boolean next(LongWritable key) throws IOException {
-    if (!more) {
-      return false;
+    @Override
+    public LongWritable getCurrentKey() throws IOException, InterruptedException {
+        return key;
     }
 
-    more = in.next(key);
-    if (!more) {
-      return false;
+    @Override
+    public BytesRefArrayWritable getCurrentValue() throws IOException, InterruptedException {
+        return value;
     }
 
-    if (in.lastSeenSyncPos() >= end) {
-      more = false;
-      return more;
-    }
-    return more;
-  }
-
-  @Override
-  public void initialize(InputSplit split, TaskAttemptContext context) throws IOException,
-      InterruptedException {
-
-    FileSplit fSplit = (FileSplit)split;
-    Path path = fSplit.getPath();
-    Configuration conf = context.getConfiguration();
-    this.in = new RCFile.Reader(path.getFileSystem(conf), path, conf);
-    this.end = fSplit.getStart() + fSplit.getLength();
-
-    if(fSplit.getStart() > in.getPosition()) {
-      in.sync(fSplit.getStart());
+    @Override
+    public float getProgress() throws IOException, InterruptedException {
+        if (end == start) {
+            return 0.0f;
+        } else {
+            return Math.min(1.0f, (in.getPosition() - start) / (float) (end - start));
+        }
     }
 
-    this.start = in.getPosition();
-    more = start < end;
+    @Override
+    public boolean nextKeyValue() throws IOException, InterruptedException {
 
-    key = new LongWritable();
-    value = new BytesRefArrayWritable();
-  }
+        more = next(key);
+        if (more) {
+            in.getCurrentRow(value);
+        }
+
+        return more;
+    }
+
+    private boolean next(LongWritable key) throws IOException {
+        if (!more) {
+            return false;
+        }
+
+        more = in.next(key);
+        if (!more) {
+            return false;
+        }
+
+        if (in.lastSeenSyncPos() >= end) {
+            more = false;
+            return more;
+        }
+        return more;
+    }
+
+    @Override
+    public void initialize(InputSplit split, TaskAttemptContext context) throws IOException,
+        InterruptedException {
+
+        FileSplit fSplit = (FileSplit) split;
+        Path path = fSplit.getPath();
+        Configuration conf = context.getConfiguration();
+        this.in = new RCFile.Reader(path.getFileSystem(conf), path, conf);
+        this.end = fSplit.getStart() + fSplit.getLength();
+
+        if (fSplit.getStart() > in.getPosition()) {
+            in.sync(fSplit.getStart());
+        }
+
+        this.start = in.getPosition();
+        more = start < end;
+
+        key = new LongWritable();
+        value = new BytesRefArrayWritable();
+    }
 }
diff --git a/src/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java b/src/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java
index a7c3311..cbafa0a 100644
--- a/src/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java
+++ b/src/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java
@@ -48,272 +48,281 @@
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 
-/** 
+/**
  * An AuthorizationProvider, which checks against the data access level permissions on HDFS.
  * It makes sense to eventually move this class to Hive, so that all hive users can
  * use this authorization model. 
  */
 public class HdfsAuthorizationProvider extends HiveAuthorizationProviderBase {
 
-  protected Warehouse wh;
-  
-  //Config variables : create an enum to store them if we have more
-  private static final String PROXY_USER_NAME = "proxy.user.name";
+    protected Warehouse wh;
 
-  public HdfsAuthorizationProvider() {
-    super();
-  }
-  
-  public HdfsAuthorizationProvider(Configuration conf) {
-    super();
-    setConf(conf);
-  }
-  
-  @Override
-  public void setConf(Configuration conf) {
-    super.setConf(conf);
-    try {
-      this.wh = new Warehouse(conf);
-    } catch (MetaException ex) {
-      throw new RuntimeException(ex);
-    }
-  }
-  
-  protected FsAction getFsAction(Privilege priv, Path path) {
-    
-    switch (priv.getPriv()) {
-    case ALL            : throw new AuthorizationException("no matching Action for Privilege.All");
-    case ALTER_DATA     : return FsAction.WRITE;   
-    case ALTER_METADATA : return FsAction.WRITE;  
-    case CREATE         : return FsAction.WRITE;
-    case DROP           : return FsAction.WRITE;
-    case INDEX          : return FsAction.WRITE;
-    case LOCK           : return FsAction.WRITE;
-    case SELECT         : return FsAction.READ;
-    case SHOW_DATABASE  : return FsAction.READ;
-    case UNKNOWN        : 
-    default             : throw new AuthorizationException("Unknown privilege");
-    }
-  }
-  
-  protected EnumSet<FsAction> getFsActions(Privilege[] privs, Path path) {
-    EnumSet<FsAction> actions = EnumSet.noneOf(FsAction.class);
-    
-    if (privs == null) {
-      return actions;
-    }
-    
-    for (Privilege priv : privs) {
-      actions.add(getFsAction(priv, path));
-    }
-    
-    return actions;
-  }
-  
-  private static final String DATABASE_WAREHOUSE_SUFFIX = ".db";
+    //Config variables : create an enum to store them if we have more
+    private static final String PROXY_USER_NAME = "proxy.user.name";
 
-  private Path getDefaultDatabasePath(String dbName) throws MetaException {
-    if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
-      return wh.getWhRoot();
-    }
-    return new Path(wh.getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX);
-  }
-  
-  protected Path getDbLocation(Database db) throws HiveException {
-    try {
-      String location = db.getLocationUri();
-      if (location == null) {
-        return getDefaultDatabasePath(db.getName());
-      } else {
-        return wh.getDnsPath(wh.getDatabasePath(db));
-      }
-    } catch (MetaException ex) {
-      throw new HiveException(ex.getMessage());
-    }
-  }
-  
-  @Override
-  public void authorize(Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-      throws HiveException, AuthorizationException {
-    //Authorize for global level permissions at the warehouse dir
-    Path root;
-    try {
-      root = wh.getWhRoot();
-      authorize(root, readRequiredPriv, writeRequiredPriv);
-    } catch (MetaException ex) {
-      throw new HiveException(ex);
-    }
-  }
-
-  @Override
-  public void authorize(Database db, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-      throws HiveException, AuthorizationException {
-    if (db == null) {
-      return;
+    public HdfsAuthorizationProvider() {
+        super();
     }
 
-    Path path = getDbLocation(db);
-    
-    authorize(path, readRequiredPriv, writeRequiredPriv);
-  }
-
-  @Override
-  public void authorize(Table table, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-      throws HiveException, AuthorizationException {
-    if (table == null) {
-      return;
-    }
-    
-    //unlike Hive's model, this can be called at CREATE TABLE as well, since we should authorize 
-    //against the table's declared location
-    Path path = null;
-    try {
-      if (table.getTTable().getSd().getLocation() == null
-          || table.getTTable().getSd().getLocation().isEmpty()) {
-            path = wh.getTablePath(hive_db.getDatabase(table.getDbName()), table.getTableName());
-      } else {
-         path = table.getPath();
-      }
-    } catch (MetaException ex) {
-      throw new HiveException(ex);
-    }
-    
-    authorize(path, readRequiredPriv, writeRequiredPriv);
-  }
-
-  //TODO: HiveAuthorizationProvider should expose this interface instead of #authorize(Partition, Privilege[], Privilege[])
-  public void authorize(Table table, Partition part, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-      throws HiveException, AuthorizationException {
-    
-    if (part == null || part.getLocation() == null) {
-      authorize(table, readRequiredPriv, writeRequiredPriv);
-    } else {
-      authorize(part.getPartitionPath(), readRequiredPriv, writeRequiredPriv);
-    }
-  }
-
-  @Override
-  public void authorize(Partition part, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-      throws HiveException, AuthorizationException {
-    if (part == null) {
-      return;
-    }
-    authorize(part.getTable(), part, readRequiredPriv, writeRequiredPriv);
-  }
-
-  @Override
-  public void authorize(Table table, Partition part, List<String> columns,
-      Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException,
-      AuthorizationException {
-    //columns cannot live in different files, just check for partition level permissions
-    authorize(table, part, readRequiredPriv, writeRequiredPriv);
-  }
-  
-  /** 
-   * Authorization privileges against a path.
-   * @param path a filesystem path
-   * @param readRequiredPriv a list of privileges needed for inputs.
-   * @param writeRequiredPriv a list of privileges needed for outputs.
-   */
-  public void authorize(Path path, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) 
-      throws HiveException, AuthorizationException {
-    try {
-      EnumSet<FsAction> actions = getFsActions(readRequiredPriv, path);
-      actions.addAll(getFsActions(writeRequiredPriv, path));
-      if (actions.isEmpty()) {
-        return;
-      }
-      
-      checkPermissions(getConf(), path, actions);
-      
-    } catch (AccessControlException ex) {
-      throw new AuthorizationException(ex);
-    } catch (LoginException ex) {
-      throw new AuthorizationException(ex);
-    } catch (IOException ex) {
-      throw new HiveException(ex);
-    }
-  }
-  
-  /**
-   * Checks the permissions for the given path and current user on Hadoop FS. If the given path 
-   * does not exists, it checks for it's parent folder.
-   */
-  protected static void checkPermissions(final Configuration conf, final Path path, 
-      final EnumSet<FsAction> actions) throws IOException, LoginException {
-
-    if (path == null) {
-      throw new IllegalArgumentException("path is null");
+    public HdfsAuthorizationProvider(Configuration conf) {
+        super();
+        setConf(conf);
     }
 
-    HadoopShims shims = ShimLoader.getHadoopShims();
-    final UserGroupInformation ugi;
-    if(conf.get(PROXY_USER_NAME) != null){
-        ugi = UserGroupInformation.createRemoteUser(conf.get(PROXY_USER_NAME));
-    }
-    else {
-        ugi = shims.getUGIForConf(conf);
-    }
-    final String user = shims.getShortUserName(ugi);  
-        
-    final FileSystem fs = path.getFileSystem(conf);
-
-    if (fs.exists(path)) {
-      checkPermissions(fs, path, actions, user, ugi.getGroupNames());
-    } else if (path.getParent() != null) {
-      // find the ancestor which exists to check it's permissions
-      Path par = path.getParent();
-      while (par != null) {
-        if (fs.exists(par)) {
-          break;
+    @Override
+    public void setConf(Configuration conf) {
+        super.setConf(conf);
+        try {
+            this.wh = new Warehouse(conf);
+        } catch (MetaException ex) {
+            throw new RuntimeException(ex);
         }
-        par = par.getParent();
-      }
-
-      checkPermissions(fs, par, actions, user, ugi.getGroupNames());
-    }
-  }
-  
-  /**
-   * Checks the permissions for the given path and current user on Hadoop FS. If the given path 
-   * does not exists, it returns.
-   */
-  @SuppressWarnings("deprecation")
-  protected static void checkPermissions(final FileSystem fs, final Path path,
-      final EnumSet<FsAction> actions, String user, String[] groups) throws IOException,
-      AccessControlException {
-    
-    final FileStatus stat;
-
-    try {
-      stat = fs.getFileStatus(path);
-    } catch (FileNotFoundException fnfe) {
-      // File named by path doesn't exist; nothing to validate.
-      return;
-    } catch (org.apache.hadoop.fs.permission.AccessControlException ace) {
-      // Older hadoop version will throw this @deprecated Exception.
-      throw new AccessControlException(ace.getMessage());
     }
 
-    final FsPermission dirPerms = stat.getPermission();
-    final String grp = stat.getGroup();
+    protected FsAction getFsAction(Privilege priv, Path path) {
 
-    for (FsAction action : actions) {
-      if (user.equals(stat.getOwner())) {
-        if (dirPerms.getUserAction().implies(action)) {
-          continue;
+        switch (priv.getPriv()) {
+        case ALL:
+            throw new AuthorizationException("no matching Action for Privilege.All");
+        case ALTER_DATA:
+            return FsAction.WRITE;
+        case ALTER_METADATA:
+            return FsAction.WRITE;
+        case CREATE:
+            return FsAction.WRITE;
+        case DROP:
+            return FsAction.WRITE;
+        case INDEX:
+            return FsAction.WRITE;
+        case LOCK:
+            return FsAction.WRITE;
+        case SELECT:
+            return FsAction.READ;
+        case SHOW_DATABASE:
+            return FsAction.READ;
+        case UNKNOWN:
+        default:
+            throw new AuthorizationException("Unknown privilege");
         }
-      }
-      if (ArrayUtils.contains(groups, grp)) {
-        if (dirPerms.getGroupAction().implies(action)) {
-          continue;
-        }
-      }
-      if (dirPerms.getOtherAction().implies(action)) {
-        continue;
-      }
-      throw new AccessControlException("action " + action + " not permitted on path " 
-          + path + " for user " + user);
     }
-  }
+
+    protected EnumSet<FsAction> getFsActions(Privilege[] privs, Path path) {
+        EnumSet<FsAction> actions = EnumSet.noneOf(FsAction.class);
+
+        if (privs == null) {
+            return actions;
+        }
+
+        for (Privilege priv : privs) {
+            actions.add(getFsAction(priv, path));
+        }
+
+        return actions;
+    }
+
+    private static final String DATABASE_WAREHOUSE_SUFFIX = ".db";
+
+    private Path getDefaultDatabasePath(String dbName) throws MetaException {
+        if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
+            return wh.getWhRoot();
+        }
+        return new Path(wh.getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX);
+    }
+
+    protected Path getDbLocation(Database db) throws HiveException {
+        try {
+            String location = db.getLocationUri();
+            if (location == null) {
+                return getDefaultDatabasePath(db.getName());
+            } else {
+                return wh.getDnsPath(wh.getDatabasePath(db));
+            }
+        } catch (MetaException ex) {
+            throw new HiveException(ex.getMessage());
+        }
+    }
+
+    @Override
+    public void authorize(Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        //Authorize for global level permissions at the warehouse dir
+        Path root;
+        try {
+            root = wh.getWhRoot();
+            authorize(root, readRequiredPriv, writeRequiredPriv);
+        } catch (MetaException ex) {
+            throw new HiveException(ex);
+        }
+    }
+
+    @Override
+    public void authorize(Database db, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        if (db == null) {
+            return;
+        }
+
+        Path path = getDbLocation(db);
+
+        authorize(path, readRequiredPriv, writeRequiredPriv);
+    }
+
+    @Override
+    public void authorize(Table table, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        if (table == null) {
+            return;
+        }
+
+        //unlike Hive's model, this can be called at CREATE TABLE as well, since we should authorize
+        //against the table's declared location
+        Path path = null;
+        try {
+            if (table.getTTable().getSd().getLocation() == null
+                || table.getTTable().getSd().getLocation().isEmpty()) {
+                path = wh.getTablePath(hive_db.getDatabase(table.getDbName()), table.getTableName());
+            } else {
+                path = table.getPath();
+            }
+        } catch (MetaException ex) {
+            throw new HiveException(ex);
+        }
+
+        authorize(path, readRequiredPriv, writeRequiredPriv);
+    }
+
+    //TODO: HiveAuthorizationProvider should expose this interface instead of #authorize(Partition, Privilege[], Privilege[])
+    public void authorize(Table table, Partition part, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+
+        if (part == null || part.getLocation() == null) {
+            authorize(table, readRequiredPriv, writeRequiredPriv);
+        } else {
+            authorize(part.getPartitionPath(), readRequiredPriv, writeRequiredPriv);
+        }
+    }
+
+    @Override
+    public void authorize(Partition part, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        if (part == null) {
+            return;
+        }
+        authorize(part.getTable(), part, readRequiredPriv, writeRequiredPriv);
+    }
+
+    @Override
+    public void authorize(Table table, Partition part, List<String> columns,
+                          Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
+        //columns cannot live in different files, just check for partition level permissions
+        authorize(table, part, readRequiredPriv, writeRequiredPriv);
+    }
+
+    /**
+     * Authorization privileges against a path.
+     * @param path a filesystem path
+     * @param readRequiredPriv a list of privileges needed for inputs.
+     * @param writeRequiredPriv a list of privileges needed for outputs.
+     */
+    public void authorize(Path path, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        try {
+            EnumSet<FsAction> actions = getFsActions(readRequiredPriv, path);
+            actions.addAll(getFsActions(writeRequiredPriv, path));
+            if (actions.isEmpty()) {
+                return;
+            }
+
+            checkPermissions(getConf(), path, actions);
+
+        } catch (AccessControlException ex) {
+            throw new AuthorizationException(ex);
+        } catch (LoginException ex) {
+            throw new AuthorizationException(ex);
+        } catch (IOException ex) {
+            throw new HiveException(ex);
+        }
+    }
+
+    /**
+     * Checks the permissions for the given path and current user on Hadoop FS. If the given path
+     * does not exists, it checks for it's parent folder.
+     */
+    protected static void checkPermissions(final Configuration conf, final Path path,
+                                           final EnumSet<FsAction> actions) throws IOException, LoginException {
+
+        if (path == null) {
+            throw new IllegalArgumentException("path is null");
+        }
+
+        HadoopShims shims = ShimLoader.getHadoopShims();
+        final UserGroupInformation ugi;
+        if (conf.get(PROXY_USER_NAME) != null) {
+            ugi = UserGroupInformation.createRemoteUser(conf.get(PROXY_USER_NAME));
+        } else {
+            ugi = shims.getUGIForConf(conf);
+        }
+        final String user = shims.getShortUserName(ugi);
+
+        final FileSystem fs = path.getFileSystem(conf);
+
+        if (fs.exists(path)) {
+            checkPermissions(fs, path, actions, user, ugi.getGroupNames());
+        } else if (path.getParent() != null) {
+            // find the ancestor which exists to check it's permissions
+            Path par = path.getParent();
+            while (par != null) {
+                if (fs.exists(par)) {
+                    break;
+                }
+                par = par.getParent();
+            }
+
+            checkPermissions(fs, par, actions, user, ugi.getGroupNames());
+        }
+    }
+
+    /**
+     * Checks the permissions for the given path and current user on Hadoop FS. If the given path
+     * does not exists, it returns.
+     */
+    @SuppressWarnings("deprecation")
+    protected static void checkPermissions(final FileSystem fs, final Path path,
+                                           final EnumSet<FsAction> actions, String user, String[] groups) throws IOException,
+        AccessControlException {
+
+        final FileStatus stat;
+
+        try {
+            stat = fs.getFileStatus(path);
+        } catch (FileNotFoundException fnfe) {
+            // File named by path doesn't exist; nothing to validate.
+            return;
+        } catch (org.apache.hadoop.fs.permission.AccessControlException ace) {
+            // Older hadoop version will throw this @deprecated Exception.
+            throw new AccessControlException(ace.getMessage());
+        }
+
+        final FsPermission dirPerms = stat.getPermission();
+        final String grp = stat.getGroup();
+
+        for (FsAction action : actions) {
+            if (user.equals(stat.getOwner())) {
+                if (dirPerms.getUserAction().implies(action)) {
+                    continue;
+                }
+            }
+            if (ArrayUtils.contains(groups, grp)) {
+                if (dirPerms.getGroupAction().implies(action)) {
+                    continue;
+                }
+            }
+            if (dirPerms.getOtherAction().implies(action)) {
+                continue;
+            }
+            throw new AccessControlException("action " + action + " not permitted on path "
+                + path + " for user " + user);
+        }
+    }
 }
diff --git a/src/java/org/apache/hcatalog/security/StorageDelegationAuthorizationProvider.java b/src/java/org/apache/hcatalog/security/StorageDelegationAuthorizationProvider.java
index 86ab414..546296c 100644
--- a/src/java/org/apache/hcatalog/security/StorageDelegationAuthorizationProvider.java
+++ b/src/java/org/apache/hcatalog/security/StorageDelegationAuthorizationProvider.java
@@ -42,93 +42,93 @@
  */
 public class StorageDelegationAuthorizationProvider extends HiveAuthorizationProviderBase {
 
-  protected HiveAuthorizationProvider hdfsAuthorizer = new HdfsAuthorizationProvider();
-  
-  protected static Map<String, String> authProviders = new HashMap<String,String>();
-  
-  @Override
-  public void setConf(Configuration conf) {
-    super.setConf(conf);
-    hdfsAuthorizer.setConf(conf);
-  }
-  
-  @Override
-  public void setAuthenticator(HiveAuthenticationProvider authenticator) {
-    super.setAuthenticator(authenticator);
-    hdfsAuthorizer.setAuthenticator(authenticator);
-  }
-  
-  static {
-    registerAuthProvider("org.apache.hadoop.hive.hbase.HBaseStorageHandler",
-        "org.apache.hcatalog.hbase.HBaseAuthorizationProvider");
-    registerAuthProvider("org.apache.hcatalog.hbase.HBaseHCatStorageHandler", 
-        "org.apache.hcatalog.hbase.HBaseAuthorizationProvider");
-  }
-  
-  //workaround until Hive adds StorageHandler.getAuthorizationProvider(). Remove these parts afterwards
-  public static void registerAuthProvider(String storageHandlerClass, 
-      String authProviderClass) {
-    authProviders.put(storageHandlerClass, authProviderClass);
-  }
-  
-  /** Returns the StorageHandler of the Table obtained from the HCatStorageHandler */
-  protected HiveAuthorizationProvider getDelegate(Table table) throws HiveException {
-    HiveStorageHandler handler =  table.getStorageHandler();
-    
-    if (handler != null) {
-      if (handler instanceof HCatStorageHandler) {
-       return ((HCatStorageHandler) handler).getAuthorizationProvider();
-      } else {
-        String authProviderClass = authProviders.get(handler.getClass().getCanonicalName());
-        
-        if (authProviderClass != null) {
-          try {
-            ReflectionUtils.newInstance(getConf().getClassByName(authProviderClass), getConf());
-          } catch (ClassNotFoundException ex) {
-            throw new HiveException("Cannot instantiate delegation AuthotizationProvider");
-          }
-        }
-        
-        //else we do not have anything to delegate to
-        throw new HiveException(String.format("Storage Handler for table:%s is not an instance " +
-            "of HCatStorageHandler", table.getTableName()));
-      }
-    } else {
-      //return an authorizer for HDFS
-      return hdfsAuthorizer;
+    protected HiveAuthorizationProvider hdfsAuthorizer = new HdfsAuthorizationProvider();
+
+    protected static Map<String, String> authProviders = new HashMap<String, String>();
+
+    @Override
+    public void setConf(Configuration conf) {
+        super.setConf(conf);
+        hdfsAuthorizer.setConf(conf);
     }
-  }
-  
-  @Override
-  public void authorize(Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-      throws HiveException, AuthorizationException {
-    //global authorizations against warehouse hdfs directory
-    hdfsAuthorizer.authorize(readRequiredPriv, writeRequiredPriv);
-  }
 
-  @Override
-  public void authorize(Database db, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-      throws HiveException, AuthorizationException {
-    //db's are tied to a hdfs location
-    hdfsAuthorizer.authorize(db, readRequiredPriv, writeRequiredPriv);
-  }
+    @Override
+    public void setAuthenticator(HiveAuthenticationProvider authenticator) {
+        super.setAuthenticator(authenticator);
+        hdfsAuthorizer.setAuthenticator(authenticator);
+    }
 
-  @Override
-  public void authorize(Table table, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-      throws HiveException, AuthorizationException {
-    getDelegate(table).authorize(table, readRequiredPriv, writeRequiredPriv);
-  }
+    static {
+        registerAuthProvider("org.apache.hadoop.hive.hbase.HBaseStorageHandler",
+            "org.apache.hcatalog.hbase.HBaseAuthorizationProvider");
+        registerAuthProvider("org.apache.hcatalog.hbase.HBaseHCatStorageHandler",
+            "org.apache.hcatalog.hbase.HBaseAuthorizationProvider");
+    }
 
-  @Override
-  public void authorize(Partition part, Privilege[] readRequiredPriv, 
-      Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException {
-    getDelegate(part.getTable()).authorize(part, readRequiredPriv, writeRequiredPriv);
-  }
+    //workaround until Hive adds StorageHandler.getAuthorizationProvider(). Remove these parts afterwards
+    public static void registerAuthProvider(String storageHandlerClass,
+                                            String authProviderClass) {
+        authProviders.put(storageHandlerClass, authProviderClass);
+    }
 
-  @Override
-  public void authorize(Table table, Partition part, List<String> columns,
-      Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException,
-      AuthorizationException {
-    getDelegate(table).authorize(table, part, columns, readRequiredPriv, writeRequiredPriv);
-  }
+    /** Returns the StorageHandler of the Table obtained from the HCatStorageHandler */
+    protected HiveAuthorizationProvider getDelegate(Table table) throws HiveException {
+        HiveStorageHandler handler = table.getStorageHandler();
+
+        if (handler != null) {
+            if (handler instanceof HCatStorageHandler) {
+                return ((HCatStorageHandler) handler).getAuthorizationProvider();
+            } else {
+                String authProviderClass = authProviders.get(handler.getClass().getCanonicalName());
+
+                if (authProviderClass != null) {
+                    try {
+                        ReflectionUtils.newInstance(getConf().getClassByName(authProviderClass), getConf());
+                    } catch (ClassNotFoundException ex) {
+                        throw new HiveException("Cannot instantiate delegation AuthotizationProvider");
+                    }
+                }
+
+                //else we do not have anything to delegate to
+                throw new HiveException(String.format("Storage Handler for table:%s is not an instance " +
+                    "of HCatStorageHandler", table.getTableName()));
+            }
+        } else {
+            //return an authorizer for HDFS
+            return hdfsAuthorizer;
+        }
+    }
+
+    @Override
+    public void authorize(Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        //global authorizations against warehouse hdfs directory
+        hdfsAuthorizer.authorize(readRequiredPriv, writeRequiredPriv);
+    }
+
+    @Override
+    public void authorize(Database db, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        //db's are tied to a hdfs location
+        hdfsAuthorizer.authorize(db, readRequiredPriv, writeRequiredPriv);
+    }
+
+    @Override
+    public void authorize(Table table, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        getDelegate(table).authorize(table, readRequiredPriv, writeRequiredPriv);
+    }
+
+    @Override
+    public void authorize(Partition part, Privilege[] readRequiredPriv,
+                          Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException {
+        getDelegate(part.getTable()).authorize(part, readRequiredPriv, writeRequiredPriv);
+    }
+
+    @Override
+    public void authorize(Table table, Partition part, List<String> columns,
+                          Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
+        getDelegate(table).authorize(table, part, columns, readRequiredPriv, writeRequiredPriv);
+    }
 }
diff --git a/src/java/org/apache/hcatalog/shims/HCatHadoopShims.java b/src/java/org/apache/hcatalog/shims/HCatHadoopShims.java
index 2be4509..ee693b4 100644
--- a/src/java/org/apache/hcatalog/shims/HCatHadoopShims.java
+++ b/src/java/org/apache/hcatalog/shims/HCatHadoopShims.java
@@ -39,53 +39,55 @@
  **/
 public interface HCatHadoopShims {
 
-  enum PropertyName { CACHE_ARCHIVES, CACHE_FILES, CACHE_SYMLINK };
+    enum PropertyName {CACHE_ARCHIVES, CACHE_FILES, CACHE_SYMLINK}
 
-  public static abstract class Instance {
-    static HCatHadoopShims instance = selectShim();
+    ;
 
-    public static HCatHadoopShims get() {
-      return instance;
+    public static abstract class Instance {
+        static HCatHadoopShims instance = selectShim();
+
+        public static HCatHadoopShims get() {
+            return instance;
+        }
+
+        private static HCatHadoopShims selectShim() {
+            // piggyback on Hive's detection logic
+            String major = ShimLoader.getMajorVersion();
+            String shimFQN = "org.apache.hcatalog.shims.HCatHadoopShims20S";
+            if (major.startsWith("0.23")) {
+                shimFQN = "org.apache.hcatalog.shims.HCatHadoopShims23";
+            }
+            try {
+                Class<? extends HCatHadoopShims> clasz = Class.forName(shimFQN)
+                    .asSubclass(HCatHadoopShims.class);
+                return clasz.newInstance();
+            } catch (Exception e) {
+                throw new RuntimeException("Failed to instantiate: " + shimFQN, e);
+            }
+        }
     }
 
-    private static HCatHadoopShims selectShim() {
-      // piggyback on Hive's detection logic
-      String major = ShimLoader.getMajorVersion();
-      String shimFQN = "org.apache.hcatalog.shims.HCatHadoopShims20S";
-      if (major.startsWith("0.23")) {
-        shimFQN = "org.apache.hcatalog.shims.HCatHadoopShims23";
-      }
-      try {
-        Class<? extends HCatHadoopShims> clasz = Class.forName(shimFQN)
-            .asSubclass(HCatHadoopShims.class);
-        return clasz.newInstance();
-      } catch (Exception e) {
-        throw new RuntimeException("Failed to instantiate: " + shimFQN, e);
-      }
-    }
-  }
+    public TaskID createTaskID();
 
-  public TaskID createTaskID();
+    public TaskAttemptID createTaskAttemptID();
 
-  public TaskAttemptID createTaskAttemptID();
+    public org.apache.hadoop.mapreduce.TaskAttemptContext createTaskAttemptContext(Configuration conf,
+                                                                                   TaskAttemptID taskId);
 
-  public org.apache.hadoop.mapreduce.TaskAttemptContext createTaskAttemptContext(Configuration conf,
-          TaskAttemptID taskId);
+    public org.apache.hadoop.mapred.TaskAttemptContext createTaskAttemptContext(JobConf conf,
+                                                                                org.apache.hadoop.mapred.TaskAttemptID taskId, Progressable progressable);
 
-  public org.apache.hadoop.mapred.TaskAttemptContext createTaskAttemptContext(JobConf conf,
-          org.apache.hadoop.mapred.TaskAttemptID taskId, Progressable progressable);
+    public JobContext createJobContext(Configuration conf, JobID jobId);
 
-  public JobContext createJobContext(Configuration conf, JobID jobId);
+    public org.apache.hadoop.mapred.JobContext createJobContext(JobConf conf, JobID jobId, Progressable progressable);
 
-  public org.apache.hadoop.mapred.JobContext createJobContext(JobConf conf, JobID jobId, Progressable progressable);
+    public void commitJob(OutputFormat outputFormat, ResourceSchema schema,
+                          String arg1, Job job) throws IOException;
 
-  public void commitJob(OutputFormat outputFormat, ResourceSchema schema,
-          String arg1, Job job) throws IOException;
+    public void abortJob(OutputFormat outputFormat, Job job) throws IOException;
 
-  public void abortJob(OutputFormat outputFormat, Job job) throws IOException;
+    /* Referring to job tracker in 0.20 and resource manager in 0.23 */
+    public InetSocketAddress getResourceManagerAddress(Configuration conf);
 
-  /* Referring to job tracker in 0.20 and resource manager in 0.23 */
-  public InetSocketAddress getResourceManagerAddress(Configuration conf);
-
-  public String getPropertyName(PropertyName name);
+    public String getPropertyName(PropertyName name);
 }
diff --git a/src/java/org/apache/hcatalog/storagehandler/DummyHCatAuthProvider.java b/src/java/org/apache/hcatalog/storagehandler/DummyHCatAuthProvider.java
index 33bdee0..6c31c10 100644
--- a/src/java/org/apache/hcatalog/storagehandler/DummyHCatAuthProvider.java
+++ b/src/java/org/apache/hcatalog/storagehandler/DummyHCatAuthProvider.java
@@ -35,109 +35,109 @@
  * dummy authorization functionality for other classes to extend and override.
  */
 class DummyHCatAuthProvider implements HiveAuthorizationProvider {
-    
+
     @Override
     public Configuration getConf() {
         return null;
     }
-    
+
     @Override
     public void setConf(Configuration conf) {
     }
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see
-     * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
-     * #init(org.apache.hadoop.conf.Configuration)
-     */
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #init(org.apache.hadoop.conf.Configuration)
+    */
     @Override
     public void init(Configuration conf) throws HiveException {
     }
-    
+
     @Override
     public HiveAuthenticationProvider getAuthenticator() {
         return null;
     }
-    
+
     @Override
     public void setAuthenticator(HiveAuthenticationProvider authenticator) {
     }
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see
-     * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
-     * #authorize(org.apache.hadoop.hive.ql.security.authorization.Privilege[],
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
-     */
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
     @Override
     public void authorize(Privilege[] readRequiredPriv,
-            Privilege[] writeRequiredPriv) throws HiveException,
-            AuthorizationException {
+                          Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
     }
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see
-     * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
-     * #authorize(org.apache.hadoop.hive.metastore.api.Database,
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
-     */
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.metastore.api.Database,
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
     @Override
     public void authorize(Database db, Privilege[] readRequiredPriv,
-            Privilege[] writeRequiredPriv) throws HiveException,
-            AuthorizationException {
+                          Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
     }
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see
-     * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
-     * #authorize(org.apache.hadoop.hive.ql.metadata.Table,
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
-     */
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.ql.metadata.Table,
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
     @Override
     public void authorize(Table table, Privilege[] readRequiredPriv,
-            Privilege[] writeRequiredPriv) throws HiveException,
-            AuthorizationException {
+                          Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
     }
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see
-     * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
-     * #authorize(org.apache.hadoop.hive.ql.metadata.Partition,
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
-     */
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.ql.metadata.Partition,
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
     @Override
     public void authorize(Partition part, Privilege[] readRequiredPriv,
-            Privilege[] writeRequiredPriv) throws HiveException,
-            AuthorizationException {
+                          Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
     }
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see
-     * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
-     * #authorize(org.apache.hadoop.hive.ql.metadata.Table,
-     * org.apache.hadoop.hive.ql.metadata.Partition, java.util.List,
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
-     */
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.ql.metadata.Table,
+    * org.apache.hadoop.hive.ql.metadata.Partition, java.util.List,
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
     @Override
     public void authorize(Table table, Partition part, List<String> columns,
-            Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-            throws HiveException, AuthorizationException {
+                          Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
     }
-    
+
 }
diff --git a/src/test/e2e/hcatalog/build.xml b/src/test/e2e/hcatalog/build.xml
index 6ca71ec..f2fc1e1 100644
--- a/src/test/e2e/hcatalog/build.xml
+++ b/src/test/e2e/hcatalog/build.xml
@@ -17,314 +17,314 @@
 
 <project name="TestHarnessHCatTests" default="test">
 
-  <property name="hcat.jar"
-    value="${hcat.dir}/share/hcatalog/hcatalog-${hcatalog.version}.jar"/>
+    <property name="hcat.jar"
+              value="${hcat.dir}/share/hcatalog/hcatalog-${hcatalog.version}.jar"/>
 
-  <!-- Separate property name for udfs' build.xml -->
-  <property name="hcat.jarfile" value="${hcat.jar}"/>
-  <property name="udf.dir" value="${basedir}/udfs"/>
-  <property name="udf.java.dir" value="${udf.dir}/java"/>
-  <property name="udf.jar" value="${udf.java.dir}/testudf.jar"/>
-  <property name="params.dir" value="${basedir}/paramfiles"/>
-  <property name="lib.dir" value="${basedir}/lib"/>
-  <property name="rctool.java.dir" value="${basedir}/tools/generate/java"/>
+    <!-- Separate property name for udfs' build.xml -->
+    <property name="hcat.jarfile" value="${hcat.jar}"/>
+    <property name="udf.dir" value="${basedir}/udfs"/>
+    <property name="udf.java.dir" value="${udf.dir}/java"/>
+    <property name="udf.jar" value="${udf.java.dir}/testudf.jar"/>
+    <property name="params.dir" value="${basedir}/paramfiles"/>
+    <property name="lib.dir" value="${basedir}/lib"/>
+    <property name="rctool.java.dir" value="${basedir}/tools/generate/java"/>
 
-  <property name="tar.name" value="${basedir}/hcattests.tar"/>
-  <property name="tar.dir" value="${basedir}/tar"/>
-  <property name="test.src" value="${basedir}/tests"/>
-  <property name="driver.src" value="${basedir}/drivers"/>
-  <property name="deployer.src" value="${basedir}/deployers"/>
-  <property name="conf.src" value="${basedir}/conf"/>
-  <property name="tool.src" value="${basedir}/tools"/>
-  <property name="data.dir" value="${basedir}/data"/>
+    <property name="tar.name" value="${basedir}/hcattests.tar"/>
+    <property name="tar.dir" value="${basedir}/tar"/>
+    <property name="test.src" value="${basedir}/tests"/>
+    <property name="driver.src" value="${basedir}/drivers"/>
+    <property name="deployer.src" value="${basedir}/deployers"/>
+    <property name="conf.src" value="${basedir}/conf"/>
+    <property name="tool.src" value="${basedir}/tools"/>
+    <property name="data.dir" value="${basedir}/data"/>
 
-  <property name="harness.dir" value="${basedir}/../harness"/>
-  <property name="harness.tar" value="${harness.dir}/harness.tar"/>
-  <property name="test.location" value="${basedir}/testdist"/>
-  <property name="benchmark.location" value="${test.location}/benchmarks"/>
-  <!--<property name="hadoop.core.path" value="${harness.hadoop.home}"/>-->
-  <property name="hadoop.core.path" value="${hadoop.home}"/>
-  <!-- Override on command line to use rpm.conf -->
-  <property name="harness.conf" value="${test.location}/conf/default.conf"/>
-  <!-- Default value for output directory -->
-  <property name="harness.PH_LOCAL" value="out"/>
+    <property name="harness.dir" value="${basedir}/../harness"/>
+    <property name="harness.tar" value="${harness.dir}/harness.tar"/>
+    <property name="test.location" value="${basedir}/testdist"/>
+    <property name="benchmark.location" value="${test.location}/benchmarks"/>
+    <!--<property name="hadoop.core.path" value="${harness.hadoop.home}"/>-->
+    <property name="hadoop.core.path" value="${hadoop.home}"/>
+    <!-- Override on command line to use rpm.conf -->
+    <property name="harness.conf" value="${test.location}/conf/default.conf"/>
+    <!-- Default value for output directory -->
+    <property name="harness.PH_LOCAL" value="out"/>
 
-  <property name="hadoopversion" value="20" />
+    <property name="hadoopversion" value="20"/>
 
-  <condition property="isHadoop23">
-    <equals arg1="${hadoopversion}" arg2="23"/>
-  </condition>
+    <condition property="isHadoop23">
+        <equals arg1="${hadoopversion}" arg2="23"/>
+    </condition>
 
-  <!-- Build the UDFs -->
-  <target name="udfs" >
-    <ant dir="${udf.java.dir}"/>
-  </target>
+    <!-- Build the UDFs -->
+    <target name="udfs">
+        <ant dir="${udf.java.dir}"/>
+    </target>
 
-  <path id="hadoop.core.jar.location">
-    <fileset dir="${hadoop.core.path}">
-      <include name="hadoop-core-*.jar" unless="isHadoop23"/>
-      <include name="**/hadoop-common-*.jar" if="isHadoop23"/>
-      <include name="**/hadoop-auth-*.jar" if="isHadoop23"/>
-      <include name="**/hadoop-hdfs-*.jar" if="isHadoop23"/>
-      <include name="**/hadoop-mapreduce-client-core-*.jar" if="isHadoop23"/>
-      <include name="**/hadoop-yarn-api-*.jar" if="isHadoop23"/>
-      <include name="**/hadoop-yarn-common-*.jar" if="isHadoop23"/>
-      <include name="**/hadoop-annotations-*.jar" if="isHadoop23"/>
-    </fileset>
-  </path>
-
-  <path id="hive.serde.jar.location">
-    <!-- <fileset dir="${hive.dir}/build/serde"> -->
-    <fileset dir="${hive.home}/lib">
-      <include name="hive-serde-*.jar"/>
-    </fileset>
-  </path>
-
-  <path id="hive.ql.jar.location">
-    <!--<fileset dir="${hive.dir}/build/ql"> -->
-    <fileset dir="${hive.home}/lib">
-      <include name="hive-exec-*.jar"/>
-    </fileset>
-  </path>
-
-  <!-- Build the RCfile data generator -->
-  <target name="rctool" depends="property-check">
-    <ant dir="${rctool.java.dir}">
-      <property name="hive.serde.jarfile" refid="hive.serde.jar.location"/>
-      <property name="hive.ql.jarfile" refid="hive.ql.jar.location"/>
-      <property name="hadoop.core.jarfile" refid="hadoop.core.jar.location"/>
-    </ant>
-  </target>
-
-  <!-- Build an archive to use in the tests -->
-  <target name="tar" description="Create tar file with hcat modules">
-    <mkdir dir="${tar.dir}"/>
-    <mkdir dir="${tar.dir}/tests"/>
-    <mkdir dir="${tar.dir}/drivers"/>
-    <mkdir dir="${tar.dir}/deployers"/>
-    <mkdir dir="${tar.dir}/conf"/>
-    <mkdir dir="${tar.dir}/libexec"/>
-    <mkdir dir="${tar.dir}/libexec/PigTest"/>
-    <mkdir dir="${tar.dir}/libexec/PigTest/test"/>
-    <mkdir dir="${tar.dir}/libexec/PigTest/generate"/>
-    <mkdir dir="${tar.dir}/lib"/>
-    <mkdir dir="${tar.dir}/lib/java"/>
-    <mkdir dir="${tar.dir}/paramfiles"/>
-
-    <copy todir="${tar.dir}/tests">
-        <fileset dir="${test.src}">
+    <path id="hadoop.core.jar.location">
+        <fileset dir="${hadoop.core.path}">
+            <include name="hadoop-core-*.jar" unless="isHadoop23"/>
+            <include name="**/hadoop-common-*.jar" if="isHadoop23"/>
+            <include name="**/hadoop-auth-*.jar" if="isHadoop23"/>
+            <include name="**/hadoop-hdfs-*.jar" if="isHadoop23"/>
+            <include name="**/hadoop-mapreduce-client-core-*.jar" if="isHadoop23"/>
+            <include name="**/hadoop-yarn-api-*.jar" if="isHadoop23"/>
+            <include name="**/hadoop-yarn-common-*.jar" if="isHadoop23"/>
+            <include name="**/hadoop-annotations-*.jar" if="isHadoop23"/>
         </fileset>
-    </copy>
-    
-    <copy todir="${tar.dir}/data">
-        <fileset dir="${data.dir}">
+    </path>
+
+    <path id="hive.serde.jar.location">
+        <!-- <fileset dir="${hive.dir}/build/serde"> -->
+        <fileset dir="${hive.home}/lib">
+            <include name="hive-serde-*.jar"/>
         </fileset>
-    </copy>
+    </path>
+
+    <path id="hive.ql.jar.location">
+        <!--<fileset dir="${hive.dir}/build/ql"> -->
+        <fileset dir="${hive.home}/lib">
+            <include name="hive-exec-*.jar"/>
+        </fileset>
+    </path>
+
+    <!-- Build the RCfile data generator -->
+    <target name="rctool" depends="property-check">
+        <ant dir="${rctool.java.dir}">
+            <property name="hive.serde.jarfile" refid="hive.serde.jar.location"/>
+            <property name="hive.ql.jarfile" refid="hive.ql.jar.location"/>
+            <property name="hadoop.core.jarfile" refid="hadoop.core.jar.location"/>
+        </ant>
+    </target>
+
+    <!-- Build an archive to use in the tests -->
+    <target name="tar" description="Create tar file with hcat modules">
+        <mkdir dir="${tar.dir}"/>
+        <mkdir dir="${tar.dir}/tests"/>
+        <mkdir dir="${tar.dir}/drivers"/>
+        <mkdir dir="${tar.dir}/deployers"/>
+        <mkdir dir="${tar.dir}/conf"/>
+        <mkdir dir="${tar.dir}/libexec"/>
+        <mkdir dir="${tar.dir}/libexec/PigTest"/>
+        <mkdir dir="${tar.dir}/libexec/PigTest/test"/>
+        <mkdir dir="${tar.dir}/libexec/PigTest/generate"/>
+        <mkdir dir="${tar.dir}/lib"/>
+        <mkdir dir="${tar.dir}/lib/java"/>
+        <mkdir dir="${tar.dir}/paramfiles"/>
+
+        <copy todir="${tar.dir}/tests">
+            <fileset dir="${test.src}">
+            </fileset>
+        </copy>
+
+        <copy todir="${tar.dir}/data">
+            <fileset dir="${data.dir}">
+            </fileset>
+        </copy>
 
 
-    <copy todir="${tar.dir}">
-      <fileset dir="${driver.src}">
-        <exclude name="TestDriverScript.pm"/>
-      </fileset>
-      <fileset dir="${deployer.src}"/>
-    </copy>
+        <copy todir="${tar.dir}">
+            <fileset dir="${driver.src}">
+                <exclude name="TestDriverScript.pm"/>
+            </fileset>
+            <fileset dir="${deployer.src}"/>
+        </copy>
 
 
-    <copy todir="${tar.dir}/conf">
-      <fileset dir="${conf.src}"/>
-    </copy>
+        <copy todir="${tar.dir}/conf">
+            <fileset dir="${conf.src}"/>
+        </copy>
 
-    <copy todir="${tar.dir}/libexec/HCatTest">
-      <fileset dir="${tool.src}/test"/>
-      <fileset dir="${tool.src}/generate"/>
-      <fileset dir="${tool.src}/install"/>
-    </copy>
+        <copy todir="${tar.dir}/libexec/HCatTest">
+            <fileset dir="${tool.src}/test"/>
+            <fileset dir="${tool.src}/generate"/>
+            <fileset dir="${tool.src}/install"/>
+        </copy>
 
-    <copy todir="${tar.dir}/lib/java">
-      <fileset file="${udf.jar}"/>
-    </copy>
+        <copy todir="${tar.dir}/lib/java">
+            <fileset file="${udf.jar}"/>
+        </copy>
 
-    <copy todir="${tar.dir}/paramfiles">
-      <fileset file="${params.dir}/params_3"/>
-    </copy>
+        <copy todir="${tar.dir}/paramfiles">
+            <fileset file="${params.dir}/params_3"/>
+        </copy>
 
-    <tar destfile="${tar.name}" basedir="${tar.dir}"/>
-  </target>
+        <tar destfile="${tar.name}" basedir="${tar.dir}"/>
+    </target>
 
-  <!-- Get the tarball for the harness -->
-  <target name="build-harness">
-    <ant dir="${harness.dir}" inheritAll="false"/>
-  </target>
+    <!-- Get the tarball for the harness -->
+    <target name="build-harness">
+        <ant dir="${harness.dir}" inheritAll="false"/>
+    </target>
 
-  <!-- Check that the necessary properties are setup -->
-  <target name="property-check">
-    <!--
-    <fail message="Please set the property hadoop.home to the location Hadoop is installed "
-      unless="hadoop.home"/>
-      -->
-    <fail message="Please set the property hadoop.home to the location Hadoop is installed ">
-      <condition>
-        <and>
-          <not>
-            <isset property="hadoop.home"/>
-          </not>
-          <not>
-            <contains string="${harness.conf}" substring="rpm.conf"/>
-          </not>
-        </and>
-      </condition>
-    </fail>
-    <fail message="Please set the property harness.cluster.conf to the location Hadoop conf is installed ">
-      <condition>
-        <and>
-          <not>
-            <isset property="harness.cluster.conf"/>
-          </not>
-          <not>
-            <contains string="${harness.conf}" substring="rpm.conf"/>
-          </not>
-        </and>
-      </condition>
-    </fail>
-    <fail message="Please set the property hive.home to the location Hive is installed ">
-      <condition>
-        <and>
-          <not>
-            <isset property="hive.home"/>
-          </not>
-          <not>
-            <contains string="${harness.conf}" substring="rpm.conf"/>
-          </not>
-        </and>
-      </condition>
-    </fail>
-    <fail message="Please set the property hcat.home to the location HCatalog is installed ">
-      <condition>
-        <and>
-          <not>
-            <isset property="hcat.home"/>
-          </not>
-          <not>
-            <contains string="${harness.conf}" substring="rpm.conf"/>
-          </not>
-        </and>
-      </condition>
-    </fail>
-    <fail message="Please set the property pig.home to the location Pig is installed ">
-    <condition>
-        <and>
-          <not>
-            <isset property="pig.home"/>
-          </not>
-          <not>
-            <contains string="${harness.conf}" substring="rpm.conf"/>
-          </not>
-        </and>
-      </condition>
-    </fail>
+    <!-- Check that the necessary properties are setup -->
+    <target name="property-check">
+        <!--
+      <fail message="Please set the property hadoop.home to the location Hadoop is installed "
+        unless="hadoop.home"/>
+        -->
+        <fail message="Please set the property hadoop.home to the location Hadoop is installed ">
+            <condition>
+                <and>
+                    <not>
+                        <isset property="hadoop.home"/>
+                    </not>
+                    <not>
+                        <contains string="${harness.conf}" substring="rpm.conf"/>
+                    </not>
+                </and>
+            </condition>
+        </fail>
+        <fail message="Please set the property harness.cluster.conf to the location Hadoop conf is installed ">
+            <condition>
+                <and>
+                    <not>
+                        <isset property="harness.cluster.conf"/>
+                    </not>
+                    <not>
+                        <contains string="${harness.conf}" substring="rpm.conf"/>
+                    </not>
+                </and>
+            </condition>
+        </fail>
+        <fail message="Please set the property hive.home to the location Hive is installed ">
+            <condition>
+                <and>
+                    <not>
+                        <isset property="hive.home"/>
+                    </not>
+                    <not>
+                        <contains string="${harness.conf}" substring="rpm.conf"/>
+                    </not>
+                </and>
+            </condition>
+        </fail>
+        <fail message="Please set the property hcat.home to the location HCatalog is installed ">
+            <condition>
+                <and>
+                    <not>
+                        <isset property="hcat.home"/>
+                    </not>
+                    <not>
+                        <contains string="${harness.conf}" substring="rpm.conf"/>
+                    </not>
+                </and>
+            </condition>
+        </fail>
+        <fail message="Please set the property pig.home to the location Pig is installed ">
+            <condition>
+                <and>
+                    <not>
+                        <isset property="pig.home"/>
+                    </not>
+                    <not>
+                        <contains string="${harness.conf}" substring="rpm.conf"/>
+                    </not>
+                </and>
+            </condition>
+        </fail>
 
-    <fail message="Please set the property hbase.home to the location HBase is installed ">
-      <condition>
-        <and>
-          <not>
-            <isset property="hbase.home"/>
-          </not>
-          <not>
-            <contains string="${harness.conf}" substring="rpm.conf"/>
-          </not>
-        </and>
-      </condition>
-    </fail>
-  </target>
+        <fail message="Please set the property hbase.home to the location HBase is installed ">
+            <condition>
+                <and>
+                    <not>
+                        <isset property="hbase.home"/>
+                    </not>
+                    <not>
+                        <contains string="${harness.conf}" substring="rpm.conf"/>
+                    </not>
+                </and>
+            </condition>
+        </fail>
+    </target>
 
-  <!-- Prep the test area -->
-  <target name="init-test" depends="build-harness, tar">
-    <mkdir dir="${test.location}"/>
-    <mkdir dir="${benchmark.location}"/>
+    <!-- Prep the test area -->
+    <target name="init-test" depends="build-harness, tar">
+        <mkdir dir="${test.location}"/>
+        <mkdir dir="${benchmark.location}"/>
 
-    <untar src="${tar.name}" dest="${test.location}"/>
-    <untar src="${harness.tar}" dest="${test.location}"/>
+        <untar src="${tar.name}" dest="${test.location}"/>
+        <untar src="${harness.tar}" dest="${test.location}"/>
 
-    <chmod perm="ugo+x" type="file">
-      <fileset dir="${test.location}/libexec" />
-      <fileset file="${test.location}/test_harness.pl"/>
-    </chmod>
+        <chmod perm="ugo+x" type="file">
+            <fileset dir="${test.location}/libexec"/>
+            <fileset file="${test.location}/test_harness.pl"/>
+        </chmod>
 
-  </target>
+    </target>
 
-  <target name="test" depends="property-check, udfs, tar, init-test">
+    <target name="test" depends="property-check, udfs, tar, init-test">
 
-    <!-- If they have not specified tests to run then null it out -->
-     <property name="tests.to.run" value=""/> 
-    <echo />
-    <exec executable="./test_harness.pl" dir="${test.location}" failonerror="true">
-      <env key="HARNESS_ROOT" value="."/>
-      <env key="PH_LOCAL" value="${harness.PH_LOCAL}"/>
-      <env key="HADOOP_HOME" value="${hadoop.home}"/>
-      <env key="HADOOP_CONF_DIR" value="${harness.cluster.conf}"/>
-      <env key="HIVE_HOME" value="${hive.home}"/>
-      <env key="HCAT_HOME" value="${hcat.home}"/>
-      <env key="PIG_HOME" value="${pig.home}"/>
-      <env key="HBASE_HOME" value="${hbase.home}"/>
-      <arg line="-conf ${harness.conf}"/>
-      <arg line="${tests.to.run}"/>
-      <arg value="${test.location}/tests/pig.conf"/>
-      <arg value="${test.location}/tests/hive.conf"/>
-      <arg value="${test.location}/tests/hcat.conf"/>
-      <arg value="${test.location}/tests/hadoop.conf"/>
-    </exec>
-  </target>
+        <!-- If they have not specified tests to run then null it out -->
+        <property name="tests.to.run" value=""/>
+        <echo/>
+        <exec executable="./test_harness.pl" dir="${test.location}" failonerror="true">
+            <env key="HARNESS_ROOT" value="."/>
+            <env key="PH_LOCAL" value="${harness.PH_LOCAL}"/>
+            <env key="HADOOP_HOME" value="${hadoop.home}"/>
+            <env key="HADOOP_CONF_DIR" value="${harness.cluster.conf}"/>
+            <env key="HIVE_HOME" value="${hive.home}"/>
+            <env key="HCAT_HOME" value="${hcat.home}"/>
+            <env key="PIG_HOME" value="${pig.home}"/>
+            <env key="HBASE_HOME" value="${hbase.home}"/>
+            <arg line="-conf ${harness.conf}"/>
+            <arg line="${tests.to.run}"/>
+            <arg value="${test.location}/tests/pig.conf"/>
+            <arg value="${test.location}/tests/hive.conf"/>
+            <arg value="${test.location}/tests/hcat.conf"/>
+            <arg value="${test.location}/tests/hadoop.conf"/>
+        </exec>
+    </target>
 
-  <target name="init-deploy" depends="rctool">
-     <!-- For now default to the existing cluster deployer, since 
+    <target name="init-deploy" depends="rctool">
+        <!-- For now default to the existing cluster deployer, since
     it's all there is.  Once the local deployer is available that
     should be the default. -->
-   <property name="deploy.conf"
-        value="${test.location}/conf/existing_deployer.conf"/>
-  </target>
+        <property name="deploy.conf"
+                  value="${test.location}/conf/existing_deployer.conf"/>
+    </target>
 
-  <target name="deploy-base" depends="property-check, tar, init-test, init-deploy">
-    <exec executable="./test_harness.pl" dir="${test.location}"
-      failonerror="true">
-      <env key="HARNESS_ROOT" value="."/>
-      <env key="PH_LOCAL" value="${harness.PH_LOCAL}"/>
-      <env key="HADOOP_HOME" value="${hadoop.home}"/>
-      <env key="HIVE_HOME" value="${hive.home}"/>
-      <env key="HCAT_HOME" value="${hcat.home}"/>
-      <env key="PIG_HOME" value="${pig.home}"/>
-      <env key="HBASE_HOME" value="${hbase.home}"/>
-      <arg line="-conf ${harness.conf}"/>
-      <arg value="-deploycfg"/>
-      <arg value="${deploy.conf}"/>
-      <arg value="${deploy.opt}"/>
-      <!-- Give a bogus test so it just does the deployment -->
-      <arg value="-t"/>
-      <arg value="NoSuchTest"/>
-    </exec>
-  </target>
+    <target name="deploy-base" depends="property-check, tar, init-test, init-deploy">
+        <exec executable="./test_harness.pl" dir="${test.location}"
+              failonerror="true">
+            <env key="HARNESS_ROOT" value="."/>
+            <env key="PH_LOCAL" value="${harness.PH_LOCAL}"/>
+            <env key="HADOOP_HOME" value="${hadoop.home}"/>
+            <env key="HIVE_HOME" value="${hive.home}"/>
+            <env key="HCAT_HOME" value="${hcat.home}"/>
+            <env key="PIG_HOME" value="${pig.home}"/>
+            <env key="HBASE_HOME" value="${hbase.home}"/>
+            <arg line="-conf ${harness.conf}"/>
+            <arg value="-deploycfg"/>
+            <arg value="${deploy.conf}"/>
+            <arg value="${deploy.opt}"/>
+            <!-- Give a bogus test so it just does the deployment -->
+            <arg value="-t"/>
+            <arg value="NoSuchTest"/>
+        </exec>
+    </target>
 
-  <target name="deploy">
-    <antcall target="deploy-base">
-      <param name="deploy.opt" value="-deploy"/>
-    </antcall>
-  </target>
+    <target name="deploy">
+        <antcall target="deploy-base">
+            <param name="deploy.opt" value="-deploy"/>
+        </antcall>
+    </target>
 
-  <target name="undeploy">
-    <antcall target="deploy-base">
-      <param name="deploy.opt" value="-undeploy"/>
-    </antcall>
-  </target>
+    <target name="undeploy">
+        <antcall target="deploy-base">
+            <param name="deploy.opt" value="-undeploy"/>
+        </antcall>
+    </target>
 
-  <target name="deploy-test" depends="deploy, test"/>
+    <target name="deploy-test" depends="deploy, test"/>
 
-  <target name="deploy-test-undeploy" depends="deploy, test, undeploy"/>
+    <target name="deploy-test-undeploy" depends="deploy, test, undeploy"/>
 
-  <target name="clean">
-    <delete dir="${test.location}"/>
-    <delete file="${tar.name}"/>
-    <delete dir="${tar.dir}"/>
-    <ant dir="${udf.java.dir}" target="clean"/>
-  </target>
+    <target name="clean">
+        <delete dir="${test.location}"/>
+        <delete file="${tar.name}"/>
+        <delete dir="${tar.dir}"/>
+        <ant dir="${udf.java.dir}" target="clean"/>
+    </target>
 
 </project>
 
diff --git a/src/test/e2e/hcatalog/tools/generate/java/build.xml b/src/test/e2e/hcatalog/tools/generate/java/build.xml
index 2b2d321..11337d8 100644
--- a/src/test/e2e/hcatalog/tools/generate/java/build.xml
+++ b/src/test/e2e/hcatalog/tools/generate/java/build.xml
@@ -15,57 +15,56 @@
 
 <project name="Hive-Data-Generator" default="generator-jar">
 
-    <property name="generator.jarfile" value="hive-gen.jar" />
-    <property name="generator.build.dir" value="${basedir}/build" />
-    <property name="generator.src.dir" value="${basedir}/org" />
+    <property name="generator.jarfile" value="hive-gen.jar"/>
+    <property name="generator.build.dir" value="${basedir}/build"/>
+    <property name="generator.src.dir" value="${basedir}/org"/>
 
 
     <path id="generator-classpath">
-        <fileset file="${hive.serde.jarfile}" />
-        <fileset file="${hive.ql.jarfile}" />
-        <fileset file="${hadoop.core.jarfile}" />
+        <fileset file="${hive.serde.jarfile}"/>
+        <fileset file="${hive.ql.jarfile}"/>
+        <fileset file="${hadoop.core.jarfile}"/>
     </path>
 
     <target name="init">
-        <mkdir dir="${generator.build.dir}" />
+        <mkdir dir="${generator.build.dir}"/>
     </target>
 
     <target name="clean">
-        <delete dir="${generator.build.dir}" />
-        <delete file="${generator.jarfile}" />
+        <delete dir="${generator.build.dir}"/>
+        <delete file="${generator.jarfile}"/>
     </target>
 
     <target name="generator-compile"
             depends="init, serde.jar.check, ql.jar.check, hadoop.jar.check">
         <echo>*** Compiling UDFs ***</echo>
-        <javac srcdir="${generator.src.dir}" destdir="${generator.build.dir}" debug="on" includeantruntime="false" includes="**/*.java">
-            <classpath refid="generator-classpath" />
+        <javac srcdir="${generator.src.dir}" destdir="${generator.build.dir}" debug="on" includeantruntime="false"
+               includes="**/*.java">
+            <classpath refid="generator-classpath"/>
         </javac>
     </target>
 
     <target name="generator-jar" depends="generator-compile">
         <echo>*** Creating UDF jar ***</echo>
         <jar duplicate="preserve" jarfile="${generator.jarfile}">
-	    <fileset dir="build"/>
+            <fileset dir="build"/>
         </jar>
     </target>
 
-	<target name="serde.jar.check" unless="hive.serde.jarfile">
-	 	<fail message="'hive.serde.jarfile' is not defined. 
-		Please pass -Dhive.serde.jarfile=&lt;Hive serde jar to use&gt; to Ant on the command-line." />
-	</target>
+    <target name="serde.jar.check" unless="hive.serde.jarfile">
+        <fail message="'hive.serde.jarfile' is not defined.
+		Please pass -Dhive.serde.jarfile=&lt;Hive serde jar to use&gt; to Ant on the command-line."/>
+    </target>
 
-	<target name="ql.jar.check" unless="hive.ql.jarfile">
-	 	<fail message="'hive.ql.jarfile' is not defined. 
-		Please pass -Dhive.ql.jarfile=&lt;Hive ql jar to use&gt; to Ant on the command-line." />
-	</target>
+    <target name="ql.jar.check" unless="hive.ql.jarfile">
+        <fail message="'hive.ql.jarfile' is not defined.
+		Please pass -Dhive.ql.jarfile=&lt;Hive ql jar to use&gt; to Ant on the command-line."/>
+    </target>
 
-	<target name="hadoop.jar.check" unless="hadoop.core.jarfile">
-	 	<fail message="'hadoop.core.jarfile' is not defined. 
-		Please pass -Dhadoop.core.jarfile=&lt;Hadoop core jar to use&gt; to Ant on the command-line." />
-	</target>
-
-
+    <target name="hadoop.jar.check" unless="hadoop.core.jarfile">
+        <fail message="'hadoop.core.jarfile' is not defined.
+		Please pass -Dhadoop.core.jarfile=&lt;Hadoop core jar to use&gt; to Ant on the command-line."/>
+    </target>
 
 
 </project>
diff --git a/src/test/e2e/hcatalog/tools/generate/java/org/apache/hadoop/hive/tools/generate/RCFileGenerator.java b/src/test/e2e/hcatalog/tools/generate/java/org/apache/hadoop/hive/tools/generate/RCFileGenerator.java
index 1ac5382..a2e8d9e 100644
--- a/src/test/e2e/hcatalog/tools/generate/java/org/apache/hadoop/hive/tools/generate/RCFileGenerator.java
+++ b/src/test/e2e/hcatalog/tools/generate/java/org/apache/hadoop/hive/tools/generate/RCFileGenerator.java
@@ -46,24 +46,24 @@
     private static Random rand;
 
     private static Path getFile(String filename) throws Exception {
-       return new Path(basedir, filename);
+        return new Path(basedir, filename);
     }
 
     private static String[] firstName = {"alice", "bob", "calvin", "david",
-      "ethan", "fred", "gabriella", "holly", "irene", "jessica", "katie",
-      "luke", "mike", "nick", "oscar", "priscilla", "quinn", "rachel",
-      "sarah", "tom", "ulysses", "victor", "wendy", "xavier", "yuri",
-      "zach"};
+        "ethan", "fred", "gabriella", "holly", "irene", "jessica", "katie",
+        "luke", "mike", "nick", "oscar", "priscilla", "quinn", "rachel",
+        "sarah", "tom", "ulysses", "victor", "wendy", "xavier", "yuri",
+        "zach"};
 
     private static String[] lastName = {"allen", "brown", "carson",
-      "davidson", "ellison", "falkner", "garcia", "hernandez", "ichabod",
-      "johnson", "king", "laertes", "miller", "nixon", "ovid", "polk",
-      "quirinius", "robinson", "steinbeck", "thompson", "underhill",
-      "van buren", "white", "xylophone", "young", "zipper"};
+        "davidson", "ellison", "falkner", "garcia", "hernandez", "ichabod",
+        "johnson", "king", "laertes", "miller", "nixon", "ovid", "polk",
+        "quirinius", "robinson", "steinbeck", "thompson", "underhill",
+        "van buren", "white", "xylophone", "young", "zipper"};
 
     private static String randomName() {
         StringBuffer buf =
-            new StringBuffer(firstName[rand.nextInt(firstName.length)]);
+                new StringBuffer(firstName[rand.nextInt(firstName.length)]);
         buf.append(' ');
         buf.append(lastName[rand.nextInt(lastName.length)]);
         return buf.toString();
@@ -115,8 +115,8 @@
     }
 
     private static void usage() {
-        System.err.println("Usage: rcfilegen format number_of_rows " + 
-            "output_file plain_output_file");
+        System.err.println("Usage: rcfilegen format number_of_rows " +
+                "output_file plain_output_file");
         System.err.println("  format one of:  student voter alltypes");
         System.exit(1);
     }
@@ -140,7 +140,7 @@
                                 int numRows,
                                 String output, String plainOutput) throws Exception {
         int numFields = 0;
-        if (format.equals("student")) { 
+        if (format.equals("student")) {
             rand = new Random(numRows);
             numFields = 3;
         } else if (format.equals("voter")) {
@@ -153,8 +153,8 @@
 
         RCFileOutputFormat.setColumnNumber(conf, numFields);
         RCFile.Writer writer = new RCFile.Writer(fs, conf, getFile(output),
-            null, new DefaultCodec());
-        
+                null, new DefaultCodec());
+
         PrintWriter pw = new PrintWriter(new FileWriter(plainOutput));
 
         for (int j = 0; j < numRows; j++) {
@@ -164,30 +164,30 @@
 
             if (format.equals("student")) {
                 byte[][] f = {
-                    randomName().getBytes("UTF-8"),
-                    Integer.valueOf(randomAge()).toString().getBytes("UTF-8"),
-                    Double.valueOf(randomGpa()).toString().getBytes("UTF-8")
+                        randomName().getBytes("UTF-8"),
+                        Integer.valueOf(randomAge()).toString().getBytes("UTF-8"),
+                        Double.valueOf(randomGpa()).toString().getBytes("UTF-8")
                 };
                 fields = f;
             } else if (format.equals("voter")) {
                 byte[][] f = {
-                    randomName().getBytes("UTF-8"),
-                    Integer.valueOf(randomAge()).toString().getBytes("UTF-8"),
-                    randomRegistration().getBytes("UTF-8"),
-                    Double.valueOf(randomContribution()).toString().getBytes("UTF-8")
+                        randomName().getBytes("UTF-8"),
+                        Integer.valueOf(randomAge()).toString().getBytes("UTF-8"),
+                        randomRegistration().getBytes("UTF-8"),
+                        Double.valueOf(randomContribution()).toString().getBytes("UTF-8")
                 };
                 fields = f;
             } else if (format.equals("alltypes")) {
                 byte[][] f = {
-                    Integer.valueOf(rand.nextInt(Byte.MAX_VALUE)).toString().getBytes("UTF-8"),
-                    Integer.valueOf(rand.nextInt(Short.MAX_VALUE)).toString().getBytes("UTF-8"),
-                    Integer.valueOf(rand.nextInt()).toString().getBytes("UTF-8"),
-                    Long.valueOf(rand.nextLong()).toString().getBytes("UTF-8"),
-                    Float.valueOf(rand.nextFloat() * 1000).toString().getBytes("UTF-8"),
-                    Double.valueOf(rand.nextDouble() * 1000000).toString().getBytes("UTF-8"),
-                    randomName().getBytes("UTF-8"),
-                    randomMap(),
-                    randomArray()
+                        Integer.valueOf(rand.nextInt(Byte.MAX_VALUE)).toString().getBytes("UTF-8"),
+                        Integer.valueOf(rand.nextInt(Short.MAX_VALUE)).toString().getBytes("UTF-8"),
+                        Integer.valueOf(rand.nextInt()).toString().getBytes("UTF-8"),
+                        Long.valueOf(rand.nextLong()).toString().getBytes("UTF-8"),
+                        Float.valueOf(rand.nextFloat() * 1000).toString().getBytes("UTF-8"),
+                        Double.valueOf(rand.nextDouble() * 1000000).toString().getBytes("UTF-8"),
+                        randomName().getBytes("UTF-8"),
+                        randomMap(),
+                        randomArray()
                 };
                 fields = f;
             }
@@ -195,10 +195,10 @@
 
             for (int i = 0; i < fields.length; i++) {
                 BytesRefWritable field = new BytesRefWritable(fields[i], 0,
-                    fields[i].length);
+                        fields[i].length);
                 row.set(i, field);
                 pw.print(new String(fields[i]));
-                if (i!=fields.length-1)
+                if (i != fields.length - 1)
                     pw.print("\t");
                 else
                     pw.println();
@@ -209,6 +209,6 @@
 
         writer.close();
         pw.close();
-  }
+    }
 }
 
diff --git a/src/test/e2e/hcatalog/udfs/java/build.xml b/src/test/e2e/hcatalog/udfs/java/build.xml
index 497ba0a..6486882 100644
--- a/src/test/e2e/hcatalog/udfs/java/build.xml
+++ b/src/test/e2e/hcatalog/udfs/java/build.xml
@@ -15,35 +15,35 @@
 
 <project name="HCatalog-test-utils" default="udf-jar">
 
-    <property name="udf.jarfile" value="testudf.jar" />
-    <property name="udfs.build.dir" value="${basedir}/build" />
-    <property name="udfs.src.dir" value="${basedir}/org/" />
+    <property name="udf.jarfile" value="testudf.jar"/>
+    <property name="udfs.build.dir" value="${basedir}/build"/>
+    <property name="udfs.src.dir" value="${basedir}/org/"/>
 
     <path id="udf-classpath">
-       <fileset file="../../../../../../build/hcatalog/*.jar" />
-       <fileset file="../../../../../../build/ivy/lib/default/*.jar" />
+        <fileset file="../../../../../../build/hcatalog/*.jar"/>
+        <fileset file="../../../../../../build/ivy/lib/default/*.jar"/>
     </path>
 
     <target name="init">
-        <mkdir dir="${udfs.build.dir}" />
+        <mkdir dir="${udfs.build.dir}"/>
     </target>
 
     <target name="clean">
-        <delete dir="${udfs.build.dir}" />
-        <delete file="${udf.jarfile}" />
+        <delete dir="${udfs.build.dir}"/>
+        <delete file="${udf.jarfile}"/>
     </target>
 
     <target name="udf-compile" depends="init">
         <echo>*** Compiling UDFs ***</echo>
         <javac srcdir="${udfs.src.dir}" destdir="${udfs.build.dir}" debug="on">
-            <classpath refid="udf-classpath" />
+            <classpath refid="udf-classpath"/>
         </javac>
     </target>
 
     <target name="udf-jar" depends="udf-compile">
         <echo>*** Creating UDF jar ***</echo>
         <jar duplicate="preserve" jarfile="${udf.jarfile}">
-	    <fileset dir="build"/>
+            <fileset dir="build"/>
         </jar>
     </target>
 </project>
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/DataReaderMaster.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/DataReaderMaster.java
index 0d0c625..7bbf3ff 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/DataReaderMaster.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/DataReaderMaster.java
@@ -37,34 +37,34 @@
 
 public class DataReaderMaster {
 
-	public static void main(String[] args) throws FileNotFoundException, IOException {
+    public static void main(String[] args) throws FileNotFoundException, IOException {
 
-		// This config contains all the configuration that master node wants to provide
-		// to the HCatalog.
-		Properties externalConfigs = new Properties();
-		externalConfigs.load(new FileReader(args[0]));
-		Map<String,String> config = new HashMap<String, String>();
-		
-		for (Entry<Object, Object> kv : externalConfigs.entrySet()){
-			config.put((String)kv.getKey(), (String)kv.getValue());
-		}
-		
-		// This piece of code runs in master node and gets necessary context.
-		ReaderContext context = runsInMaster(config);
+        // This config contains all the configuration that master node wants to provide
+        // to the HCatalog.
+        Properties externalConfigs = new Properties();
+        externalConfigs.load(new FileReader(args[0]));
+        Map<String, String> config = new HashMap<String, String>();
 
-		ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(new File(args[1])));
-		oos.writeObject(context);
-		oos.flush();
-		oos.close();
-		// Master node will serialize readercontext and will make it available  at slaves.
-	}
+        for (Entry<Object, Object> kv : externalConfigs.entrySet()) {
+            config.put((String) kv.getKey(), (String) kv.getValue());
+        }
 
-	private static ReaderContext runsInMaster(Map<String,String> config) throws HCatException {
+        // This piece of code runs in master node and gets necessary context.
+        ReaderContext context = runsInMaster(config);
 
-		ReadEntity.Builder builder = new ReadEntity.Builder();
-		ReadEntity entity = builder.withTable(config.get("table")).build();
-		HCatReader reader = DataTransferFactory.getHCatReader(entity, config);
-		ReaderContext cntxt = reader.prepareRead();
-		return cntxt;
-	}
+        ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(new File(args[1])));
+        oos.writeObject(context);
+        oos.flush();
+        oos.close();
+        // Master node will serialize readercontext and will make it available  at slaves.
+    }
+
+    private static ReaderContext runsInMaster(Map<String, String> config) throws HCatException {
+
+        ReadEntity.Builder builder = new ReadEntity.Builder();
+        ReadEntity entity = builder.withTable(config.get("table")).build();
+        HCatReader reader = DataTransferFactory.getHCatReader(entity, config);
+        ReaderContext cntxt = reader.prepareRead();
+        return cntxt;
+    }
 }
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/DataReaderSlave.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/DataReaderSlave.java
index b3cf98c..84ee681 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/DataReaderSlave.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/DataReaderSlave.java
@@ -34,28 +34,28 @@
 
 public class DataReaderSlave {
 
-	public static void main(String[] args) throws IOException, ClassNotFoundException {
-		
-		ObjectInputStream ois = new ObjectInputStream(new FileInputStream(new File(args[0])));
-		ReaderContext cntxt = (ReaderContext) ois.readObject();
-		ois.close();
-		
-		String[] inpSlitsToRead = args[1].split(",");
-		List<InputSplit> splits = cntxt.getSplits();
-		
-		for (int i = 0; i < inpSlitsToRead.length; i++){
-			InputSplit split = splits.get(Integer.parseInt(inpSlitsToRead[i]));
-			HCatReader reader = DataTransferFactory.getHCatReader(split, cntxt.getConf());
-			Iterator<HCatRecord> itr = reader.read();
-			File f = new File(args[2]+"-"+i);
-			f.delete();
-			BufferedWriter outFile = new BufferedWriter(new FileWriter(f)); 
-			while(itr.hasNext()){
-				String rec = itr.next().toString().replaceFirst("\\s+$", "");
-				System.err.println(rec);
-				outFile.write(rec+"\n");
-			}
-			outFile.close();
-		}
-	}
+    public static void main(String[] args) throws IOException, ClassNotFoundException {
+
+        ObjectInputStream ois = new ObjectInputStream(new FileInputStream(new File(args[0])));
+        ReaderContext cntxt = (ReaderContext) ois.readObject();
+        ois.close();
+
+        String[] inpSlitsToRead = args[1].split(",");
+        List<InputSplit> splits = cntxt.getSplits();
+
+        for (int i = 0; i < inpSlitsToRead.length; i++) {
+            InputSplit split = splits.get(Integer.parseInt(inpSlitsToRead[i]));
+            HCatReader reader = DataTransferFactory.getHCatReader(split, cntxt.getConf());
+            Iterator<HCatRecord> itr = reader.read();
+            File f = new File(args[2] + "-" + i);
+            f.delete();
+            BufferedWriter outFile = new BufferedWriter(new FileWriter(f));
+            while (itr.hasNext()) {
+                String rec = itr.next().toString().replaceFirst("\\s+$", "");
+                System.err.println(rec);
+                outFile.write(rec + "\n");
+            }
+            outFile.close();
+        }
+    }
 }
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/DataWriterMaster.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/DataWriterMaster.java
index 909b23f..e3cea56 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/DataWriterMaster.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/DataWriterMaster.java
@@ -39,57 +39,57 @@
 
 public class DataWriterMaster {
 
-	public static void main(String[] args) throws FileNotFoundException, IOException, ClassNotFoundException {
+    public static void main(String[] args) throws FileNotFoundException, IOException, ClassNotFoundException {
 
-		// This config contains all the configuration that master node wants to provide
-		// to the HCatalog.
-		Properties externalConfigs = new Properties();
-		externalConfigs.load(new FileReader(args[0]));
-		Map<String,String> config = new HashMap<String, String>();
+        // This config contains all the configuration that master node wants to provide
+        // to the HCatalog.
+        Properties externalConfigs = new Properties();
+        externalConfigs.load(new FileReader(args[0]));
+        Map<String, String> config = new HashMap<String, String>();
 
-		for (Entry<Object, Object> kv : externalConfigs.entrySet()){
-			System.err.println("k: " + kv.getKey() + "\t v: " + kv.getValue());
-			config.put((String)kv.getKey(), (String)kv.getValue());
-		}
+        for (Entry<Object, Object> kv : externalConfigs.entrySet()) {
+            System.err.println("k: " + kv.getKey() + "\t v: " + kv.getValue());
+            config.put((String) kv.getKey(), (String) kv.getValue());
+        }
 
-		if(args.length == 3 && "commit".equalsIgnoreCase(args[2])){
-			// Then, master commits if everything goes well.
-			ObjectInputStream ois = new ObjectInputStream(new FileInputStream(new File(args[1])));
-			WriterContext cntxt = (WriterContext)ois.readObject();
-			commit(config,true, cntxt);		
-			System.exit(0);
-		}
-		// This piece of code runs in master node and gets necessary context.
-		WriterContext cntxt = runsInMaster(config);
-		
-		
-		// Master node will serialize writercontext and will make it available at slaves.
-		File f = new File(args[1]);
-		f.delete();
-		ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(f));
-		oos.writeObject(cntxt);
-		oos.flush();
-		oos.close();
-	}
+        if (args.length == 3 && "commit".equalsIgnoreCase(args[2])) {
+            // Then, master commits if everything goes well.
+            ObjectInputStream ois = new ObjectInputStream(new FileInputStream(new File(args[1])));
+            WriterContext cntxt = (WriterContext) ois.readObject();
+            commit(config, true, cntxt);
+            System.exit(0);
+        }
+        // This piece of code runs in master node and gets necessary context.
+        WriterContext cntxt = runsInMaster(config);
 
-	private static WriterContext runsInMaster(Map<String, String> config) throws HCatException {
 
-		WriteEntity.Builder builder = new WriteEntity.Builder();
-		WriteEntity entity = builder.withTable(config.get("table")).build();
-		HCatWriter writer = DataTransferFactory.getHCatWriter(entity, config);
-		WriterContext info = writer.prepareWrite();
-		return info;
-	}
+        // Master node will serialize writercontext and will make it available at slaves.
+        File f = new File(args[1]);
+        f.delete();
+        ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(f));
+        oos.writeObject(cntxt);
+        oos.flush();
+        oos.close();
+    }
 
-	private static void commit(Map<String, String> config, boolean status, WriterContext cntxt) throws HCatException {
+    private static WriterContext runsInMaster(Map<String, String> config) throws HCatException {
 
-		WriteEntity.Builder builder = new WriteEntity.Builder();
-		WriteEntity entity = builder.withTable(config.get("table")).build();
-		HCatWriter writer = DataTransferFactory.getHCatWriter(entity, config);
-		if(status){
-			writer.commit(cntxt);			
-		} else {
-			writer.abort(cntxt);
-		}
-	} 
+        WriteEntity.Builder builder = new WriteEntity.Builder();
+        WriteEntity entity = builder.withTable(config.get("table")).build();
+        HCatWriter writer = DataTransferFactory.getHCatWriter(entity, config);
+        WriterContext info = writer.prepareWrite();
+        return info;
+    }
+
+    private static void commit(Map<String, String> config, boolean status, WriterContext cntxt) throws HCatException {
+
+        WriteEntity.Builder builder = new WriteEntity.Builder();
+        WriteEntity entity = builder.withTable(config.get("table")).build();
+        HCatWriter writer = DataTransferFactory.getHCatWriter(entity, config);
+        if (status) {
+            writer.commit(cntxt);
+        } else {
+            writer.abort(cntxt);
+        }
+    }
 }
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/DataWriterSlave.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/DataWriterSlave.java
index fe72b84..21d0a24 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/DataWriterSlave.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/DataWriterSlave.java
@@ -36,51 +36,51 @@
 
 public class DataWriterSlave {
 
-	public static void main(String[] args) throws FileNotFoundException, IOException, ClassNotFoundException {
-		
-		ObjectInputStream ois = new ObjectInputStream(new FileInputStream(args[0]));
-		WriterContext cntxt = (WriterContext) ois.readObject();
-		ois.close();
-		
-		HCatWriter writer = DataTransferFactory.getHCatWriter(cntxt);
-		writer.write(new HCatRecordItr(args[1]));
-		
-	}
-	
-	private static class HCatRecordItr implements Iterator<HCatRecord> {
+    public static void main(String[] args) throws FileNotFoundException, IOException, ClassNotFoundException {
 
-		BufferedReader reader;
-		String curLine;
-		
-		public HCatRecordItr(String fileName) throws FileNotFoundException {
-			reader = new BufferedReader(new FileReader(new File(fileName)));
-		}
-		
-		@Override
-		public boolean hasNext() {
-			try {
-				curLine = reader.readLine();
-			} catch (IOException e) {
-				e.printStackTrace();
-			}
-			return null == curLine ? false : true;
-		}
+        ObjectInputStream ois = new ObjectInputStream(new FileInputStream(args[0]));
+        WriterContext cntxt = (WriterContext) ois.readObject();
+        ois.close();
 
-		@Override
-		public HCatRecord next() {
+        HCatWriter writer = DataTransferFactory.getHCatWriter(cntxt);
+        writer.write(new HCatRecordItr(args[1]));
 
-			String[] fields = curLine.split("\t");
-			List<Object> data = new ArrayList<Object>(3);
-			data.add(fields[0]);
-			data.add(Integer.parseInt(fields[1]));
-			data.add(Double.parseDouble(fields[2]));
-			return new DefaultHCatRecord(data);
-		}
+    }
 
-		@Override
-		public void remove() {
-			// TODO Auto-generated method stub
-			
-		}
-	}
+    private static class HCatRecordItr implements Iterator<HCatRecord> {
+
+        BufferedReader reader;
+        String curLine;
+
+        public HCatRecordItr(String fileName) throws FileNotFoundException {
+            reader = new BufferedReader(new FileReader(new File(fileName)));
+        }
+
+        @Override
+        public boolean hasNext() {
+            try {
+                curLine = reader.readLine();
+            } catch (IOException e) {
+                e.printStackTrace();
+            }
+            return null == curLine ? false : true;
+        }
+
+        @Override
+        public HCatRecord next() {
+
+            String[] fields = curLine.split("\t");
+            List<Object> data = new ArrayList<Object>(3);
+            data.add(fields[0]);
+            data.add(Integer.parseInt(fields[1]));
+            data.add(Double.parseDouble(fields[2]));
+            return new DefaultHCatRecord(data);
+        }
+
+        @Override
+        public void remove() {
+            // TODO Auto-generated method stub
+
+        }
+    }
 }
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/GroupByAge.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/GroupByAge.java
index 0675099..e47f209 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/GroupByAge.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/GroupByAge.java
@@ -45,7 +45,7 @@
  * table. It performs a group by on the first column and a SUM operation on the
  * other columns. This is to simulate a typical operation in a map reduce
  * program to test that hcat hands the right data to the map reduce program
- * 
+ *
  * Usage: hadoop jar sumnumbers <serveruri> <output dir> <-libjars hive-hcat
  * jar> The <tab|ctrla> argument controls the output delimiter The hcat jar
  * location should be specified as file://<full path to jar>
@@ -53,40 +53,40 @@
 public class GroupByAge extends Configured implements Tool {
 
     public static class Map extends
-            Mapper<WritableComparable, HCatRecord, IntWritable, IntWritable> {
+        Mapper<WritableComparable, HCatRecord, IntWritable, IntWritable> {
 
         int age;
-        
+
         @Override
         protected void map(
-                WritableComparable key,
-                HCatRecord value,
-                org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, IntWritable, IntWritable>.Context context)
-                throws IOException, InterruptedException {
+            WritableComparable key,
+            HCatRecord value,
+            org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, IntWritable, IntWritable>.Context context)
+            throws IOException, InterruptedException {
             age = (Integer) value.get(1);
             context.write(new IntWritable(age), new IntWritable(1));
         }
     }
-    
+
     public static class Reduce extends Reducer<IntWritable, IntWritable,
-    WritableComparable, HCatRecord> {
+        WritableComparable, HCatRecord> {
 
 
-      @Override
-      protected void reduce(IntWritable key, java.lang.Iterable<IntWritable>
-        values, org.apache.hadoop.mapreduce.Reducer<IntWritable,IntWritable,WritableComparable,HCatRecord>.Context context)
-        throws IOException ,InterruptedException {
-          int sum = 0;
-          Iterator<IntWritable> iter = values.iterator();
-          while (iter.hasNext()) {
-              sum++;
-              iter.next();
-          }
-          HCatRecord record = new DefaultHCatRecord(2);
-          record.set(0, key.get());
-          record.set(1, sum);
-          
-          context.write(null, record);
+        @Override
+        protected void reduce(IntWritable key, java.lang.Iterable<IntWritable>
+            values, org.apache.hadoop.mapreduce.Reducer<IntWritable, IntWritable, WritableComparable, HCatRecord>.Context context)
+            throws IOException, InterruptedException {
+            int sum = 0;
+            Iterator<IntWritable> iter = values.iterator();
+            while (iter.hasNext()) {
+                sum++;
+                iter.next();
+            }
+            HCatRecord record = new DefaultHCatRecord(2);
+            record.set(0, key.get());
+            record.set(1, sum);
+
+            context.write(null, record);
         }
     }
 
@@ -100,12 +100,12 @@
         String dbName = null;
 
         String principalID = System
-                .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+            .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
         if (principalID != null)
             conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
         Job job = new Job(conf, "GroupByAge");
         HCatInputFormat.setInput(job, InputJobInfo.create(dbName,
-                inputTableName, null));
+            inputTableName, null));
         // initialize HCatOutputFormat
 
         job.setInputFormatClass(HCatInputFormat.class);
@@ -117,10 +117,10 @@
         job.setOutputKeyClass(WritableComparable.class);
         job.setOutputValueClass(DefaultHCatRecord.class);
         HCatOutputFormat.setOutput(job, OutputJobInfo.create(dbName,
-                outputTableName, null));
+            outputTableName, null));
         HCatSchema s = HCatOutputFormat.getTableSchema(job);
         System.err.println("INFO: output schema explicitly set for writing:"
-                + s);
+            + s);
         HCatOutputFormat.setSchema(job, s);
         job.setOutputFormatClass(HCatOutputFormat.class);
         return (job.waitForCompletion(true) ? 0 : 1);
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/HBaseReadWrite.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/HBaseReadWrite.java
index d72bf6f..25e9d29 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/HBaseReadWrite.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/HBaseReadWrite.java
@@ -48,7 +48,7 @@
  * table. It performs a group by on the first column and a SUM operation on the
  * other columns. This is to simulate a typical operation in a map reduce
  * program to test that hcat hands the right data to the map reduce program
- * 
+ *
  * Usage: hadoop jar sumnumbers <serveruri> <output dir> <-libjars hive-hcat
  * jar> The <tab|ctrla> argument controls the output delimiter The hcat jar
  * location should be specified as file://<full path to jar>
@@ -56,7 +56,7 @@
 public class HBaseReadWrite extends Configured implements Tool {
 
     public static class HBaseWriteMap extends
-            Mapper<LongWritable, Text, Text, Text> {
+        Mapper<LongWritable, Text, Text, Text> {
 
         String name;
         String age;
@@ -64,21 +64,21 @@
 
         @Override
         protected void map(
-                LongWritable key,
-                Text value,
-                org.apache.hadoop.mapreduce.Mapper<LongWritable, Text, Text, Text>.Context context)
-                throws IOException, InterruptedException {
+            LongWritable key,
+            Text value,
+            org.apache.hadoop.mapreduce.Mapper<LongWritable, Text, Text, Text>.Context context)
+            throws IOException, InterruptedException {
             String line = value.toString();
             String[] tokens = line.split("\t");
             name = tokens[0];
-            
+
             context.write(new Text(name), value);
         }
     }
-    
+
 
     public static class HBaseWriteReduce extends
-            Reducer<Text, Text, WritableComparable, HCatRecord> {
+        Reducer<Text, Text, WritableComparable, HCatRecord> {
 
         String name;
         String age;
@@ -86,7 +86,7 @@
 
         @Override
         protected void reduce(Text key, Iterable<Text> values, Context context)
-                throws IOException, InterruptedException {
+            throws IOException, InterruptedException {
             name = key.toString();
             int count = 0;
             double sum = 0;
@@ -96,21 +96,21 @@
                 name = tokens[0];
                 age = tokens[1];
                 gpa = tokens[2];
-                
+
                 count++;
                 sum += Double.parseDouble(gpa.toString());
             }
-            
+
             HCatRecord record = new DefaultHCatRecord(2);
             record.set(0, name);
             record.set(1, Double.toString(sum));
-            
+
             context.write(null, record);
         }
     }
 
     public static class HBaseReadMap extends
-            Mapper<WritableComparable, HCatRecord, Text, Text> {
+        Mapper<WritableComparable, HCatRecord, Text, Text> {
 
         String name;
         String age;
@@ -118,16 +118,16 @@
 
         @Override
         protected void map(
-                WritableComparable key,
-                HCatRecord value,
-                org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, Text, Text>.Context context)
-                throws IOException, InterruptedException {
+            WritableComparable key,
+            HCatRecord value,
+            org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, Text, Text>.Context context)
+            throws IOException, InterruptedException {
             name = (String) value.get(0);
             gpa = (String) value.get(1);
             context.write(new Text(name), new Text(gpa));
         }
     }
-    
+
 
     public int run(String[] args) throws Exception {
         Configuration conf = getConf();
@@ -140,13 +140,13 @@
         String dbName = null;
 
         String principalID = System
-                .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+            .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
         if (principalID != null)
             conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
         conf.set("hcat.hbase.output.bulkMode", "false");
         Job job = new Job(conf, "HBaseWrite");
         FileInputFormat.setInputPaths(job, inputDir);
-        
+
         job.setInputFormatClass(TextInputFormat.class);
         job.setOutputFormatClass(HCatOutputFormat.class);
         job.setJarByClass(HBaseReadWrite.class);
@@ -157,16 +157,16 @@
         job.setOutputKeyClass(WritableComparable.class);
         job.setOutputValueClass(DefaultHCatRecord.class);
         HCatOutputFormat.setOutput(job, OutputJobInfo.create(dbName,
-                tableName, null));
-        
+            tableName, null));
+
         boolean succ = job.waitForCompletion(true);
-        
+
         if (!succ) return 1;
-        
+
         job = new Job(conf, "HBaseRead");
         HCatInputFormat.setInput(job, InputJobInfo.create(dbName, tableName,
-                null));
-        
+            null));
+
         job.setInputFormatClass(HCatInputFormat.class);
         job.setOutputFormatClass(TextOutputFormat.class);
         job.setJarByClass(HBaseReadWrite.class);
@@ -175,11 +175,11 @@
         job.setOutputValueClass(Text.class);
         job.setNumReduceTasks(0);
         TextOutputFormat.setOutputPath(job, new Path(outputDir));
-        
+
         succ = job.waitForCompletion(true);
-        
+
         if (!succ) return 2;
-        
+
         return 0;
     }
 
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/HCatTestDriver.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/HCatTestDriver.java
index 5c80644..c9b1653 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/HCatTestDriver.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/HCatTestDriver.java
@@ -25,36 +25,35 @@
  * human-readable description.
  */
 public class HCatTestDriver {
-  
-  public static void main(String argv[]){
-    int exitCode = -1;
-    ProgramDriver pgd = new ProgramDriver();
-    try {
-      pgd.addClass("typedatacheck", TypeDataCheck.class, 
-                   "A map/reduce program that checks the type of each field and" +
-                   " outputs the entire table (to test hcat).");
-      pgd.addClass("sumnumbers", SumNumbers.class, 
-      "A map/reduce program that performs a group by on the first column and a " +
-      "SUM operation on the other columns of the \"numbers\" table.");
-      pgd.addClass("storenumbers", StoreNumbers.class, "A map/reduce program that " +
-      		"reads from the \"numbers\" table and adds 10 to each fields and writes " +
-      				"to the \"numbers_partitioned\" table into the datestamp=20100101 " +
-      				"partition OR the \"numbers_empty_initially\" table based on a " +
-      				"cmdline arg");
-      pgd.addClass("storecomplex", StoreComplex.class, "A map/reduce program that " +
-              "reads from the \"complex\" table and stores as-is into the " +
-              "\"complex_empty_initially\" table.");
-      pgd.addClass("storedemo", StoreDemo.class, "demo prog.");
-      pgd.driver(argv);
-      
-      // Success
-      exitCode = 0;
+
+    public static void main(String argv[]) {
+        int exitCode = -1;
+        ProgramDriver pgd = new ProgramDriver();
+        try {
+            pgd.addClass("typedatacheck", TypeDataCheck.class,
+                "A map/reduce program that checks the type of each field and" +
+                    " outputs the entire table (to test hcat).");
+            pgd.addClass("sumnumbers", SumNumbers.class,
+                "A map/reduce program that performs a group by on the first column and a " +
+                    "SUM operation on the other columns of the \"numbers\" table.");
+            pgd.addClass("storenumbers", StoreNumbers.class, "A map/reduce program that " +
+                "reads from the \"numbers\" table and adds 10 to each fields and writes " +
+                "to the \"numbers_partitioned\" table into the datestamp=20100101 " +
+                "partition OR the \"numbers_empty_initially\" table based on a " +
+                "cmdline arg");
+            pgd.addClass("storecomplex", StoreComplex.class, "A map/reduce program that " +
+                "reads from the \"complex\" table and stores as-is into the " +
+                "\"complex_empty_initially\" table.");
+            pgd.addClass("storedemo", StoreDemo.class, "demo prog.");
+            pgd.driver(argv);
+
+            // Success
+            exitCode = 0;
+        } catch (Throwable e) {
+            e.printStackTrace();
+        }
+
+        System.exit(exitCode);
     }
-    catch(Throwable e){
-      e.printStackTrace();
-    }
-    
-    System.exit(exitCode);
-  }
 }
 	
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/HCatTypeCheck.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/HCatTypeCheck.java
index 1ce838d..3f7dfbe 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/HCatTypeCheck.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/HCatTypeCheck.java
@@ -33,25 +33,25 @@
 /**
  * This UDF can be used to check that a tuple presented by HCatLoader has the
  * right types for the fields
- * 
+ *
  * Usage is :
- * 
+ *
  * register testudf.jar;
  * a = load 'numbers' using HCatLoader(...);
  * b = foreach a generate HCatTypeCheck('intnum1000:int,id:int,intnum5:int,intnum100:int,intnum:int,longnum:long,floatnum:float,doublenum:double', *);
  * store b into 'output';
- * 
+ *
  * The schema string (the first argument to the UDF) is of the form one would provide in a 
  * pig load statement.
- * 
+ *
  * The output should only contain the value '1' in all rows. (This UDF returns
  * the integer value 1 if all fields have the right type, else throws IOException)
- *  
+ *
  */
 public class HCatTypeCheck extends EvalFunc<Integer> {
 
     static HashMap<Byte, Class<?>> typeMap = new HashMap<Byte, Class<?>>();
-    
+
     @Override
     public Integer exec(Tuple input) throws IOException {
         String schemaStr = (String) input.get(0);
@@ -61,13 +61,13 @@
         } catch (Exception e) {
             throw new IOException(e);
         }
-        for(int i = 0; i < s.size(); i++) {
-            check(s.getField(i).type, input.get(i+1)); // input.get(i+1) since input.get(0) is the schema;
+        for (int i = 0; i < s.size(); i++) {
+            check(s.getField(i).type, input.get(i + 1)); // input.get(i+1) since input.get(0) is the schema;
         }
         return 1;
     }
-    
-    static {    
+
+    static {
         typeMap.put(DataType.INTEGER, Integer.class);
         typeMap.put(DataType.LONG, Long.class);
         typeMap.put(DataType.FLOAT, Float.class);
@@ -77,34 +77,33 @@
         typeMap.put(DataType.MAP, Map.class);
         typeMap.put(DataType.BAG, DataBag.class);
     }
-    
-    
-    
+
+
     private void die(String expectedType, Object o) throws IOException {
-        throw new IOException("Expected " + expectedType + ", got " +  
-              o.getClass().getName());
+        throw new IOException("Expected " + expectedType + ", got " +
+            o.getClass().getName());
     }
-    
-    
+
+
     private String check(Byte type, Object o) throws IOException {
-        if(o == null) {
+        if (o == null) {
             return "";
         }
-        if(check(typeMap.get(type), o)) {
-            if(type.equals(DataType.MAP)) {
+        if (check(typeMap.get(type), o)) {
+            if (type.equals(DataType.MAP)) {
                 Map<String, String> m = (Map<String, String>) o;
                 check(m);
-            } else if(type.equals(DataType.BAG)) {
+            } else if (type.equals(DataType.BAG)) {
                 DataBag bg = (DataBag) o;
                 for (Tuple tuple : bg) {
                     Map<String, String> m = (Map<String, String>) tuple.get(0);
                     check(m);
                 }
-            } else if(type.equals(DataType.TUPLE)) {
+            } else if (type.equals(DataType.TUPLE)) {
                 Tuple t = (Tuple) o;
-                if(!check(Integer.class, t.get(0)) ||
-                        !check(String.class, t.get(1)) ||
-                                !check(Double.class, t.get(2))) {
+                if (!check(Integer.class, t.get(0)) ||
+                    !check(String.class, t.get(1)) ||
+                    !check(Double.class, t.get(2))) {
                     die("t:tuple(num:int,str:string,dbl:double)", t);
                 }
             }
@@ -113,26 +112,26 @@
         }
         return o.toString();
     }
-    
+
     /**
-    * @param m
-    * @throws IOException 
-    */
+     * @param m
+     * @throws IOException
+     */
     private void check(Map<String, String> m) throws IOException {
-      for(Entry<String, String> e: m.entrySet()) {
-          // just access key and value to ensure they are correct
-          if(!check(String.class, e.getKey())) {
-              die("String", e.getKey());
-          }
-          if(!check(String.class, e.getValue())) {
-              die("String", e.getValue());
-          }
-      }
-      
+        for (Entry<String, String> e : m.entrySet()) {
+            // just access key and value to ensure they are correct
+            if (!check(String.class, e.getKey())) {
+                die("String", e.getKey());
+            }
+            if (!check(String.class, e.getValue())) {
+                die("String", e.getValue());
+            }
+        }
+
     }
-    
+
     private boolean check(Class<?> expected, Object actual) {
-        if(actual == null) {
+        if (actual == null) {
             return true;
         }
         return expected.isAssignableFrom(actual.getClass());
@@ -140,11 +139,11 @@
 
     Schema getSchemaFromString(String schemaString) throws Exception {
         /** ByteArrayInputStream stream = new ByteArrayInputStream(schemaString.getBytes()) ;
-        QueryParser queryParser = new QueryParser(stream) ;
-        Schema schema = queryParser.TupleSchema() ;
-        Schema.setSchemaDefaultType(schema, org.apache.pig.data.DataType.BYTEARRAY);
-        return schema;
-        */
+         QueryParser queryParser = new QueryParser(stream) ;
+         Schema schema = queryParser.TupleSchema() ;
+         Schema.setSchemaDefaultType(schema, org.apache.pig.data.DataType.BYTEARRAY);
+         return schema;
+         */
         return Utils.getSchemaFromString(schemaString);
     }
 
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/HCatTypeCheckHive.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/HCatTypeCheckHive.java
index 930756e..43a6a53 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/HCatTypeCheckHive.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/HCatTypeCheckHive.java
@@ -40,101 +40,102 @@
 
 /**
  * A hive udf to check types of the fields read from hcat. A sample hive query which can use this is:
- * 
+ *
  * create temporary function typecheck as 'org.apache.hcatalog.utils.HCatTypeCheckHive';
  * select typecheck('map<string,string>+struct<num:int,str:string,dbl:double>+array<map<string,string>>+int', 
  * mymap, mytuple, bagofmap, rownum) from complex;
- * 
- * 
+ *
+ *
  * The first argument to the UDF is a string representing the schema of the columns in the table. 
  * The columns in the tables are the remaining args to it.
  * The schema specification consists of the types as given by "describe <table>"
  * with each column's type separated from the next column's type by a '+'
- * 
+ *
  * The UDF will throw an exception (and cause the query to fail) if it does not
  * encounter the correct types.
- * 
+ *
  * The output is a string representation of the data , type and hive category.
  * It is not advisable to use this against large dataset since the output would also
  * be large. 
- * 
+ *
  */
 public final class HCatTypeCheckHive extends GenericUDF {
 
-ObjectInspector[] argOIs;
-@Override
-public Object evaluate(DeferredObject[] args) throws HiveException {
-    List<Object> row = new ArrayList<Object>();
-    String typesStr = (String) getJavaObject(args[0].get(), argOIs[0], new ArrayList<Category>());
-    String[] types = typesStr.split("\\+");
-    for(int i = 0; i < types.length; i++) {
-        types[i] = types[i].toLowerCase();
-    }
-    for(int i = 1; i < args.length; i++) {
-        ObjectInspector oi = argOIs[i];
-        List<ObjectInspector.Category> categories = new ArrayList<ObjectInspector.Category>();
-        Object o = getJavaObject(args[i].get(),oi, categories);
-        try {
-            if(o != null) {
-                Util.check(types[i-1], o);
+    ObjectInspector[] argOIs;
+
+    @Override
+    public Object evaluate(DeferredObject[] args) throws HiveException {
+        List<Object> row = new ArrayList<Object>();
+        String typesStr = (String) getJavaObject(args[0].get(), argOIs[0], new ArrayList<Category>());
+        String[] types = typesStr.split("\\+");
+        for (int i = 0; i < types.length; i++) {
+            types[i] = types[i].toLowerCase();
+        }
+        for (int i = 1; i < args.length; i++) {
+            ObjectInspector oi = argOIs[i];
+            List<ObjectInspector.Category> categories = new ArrayList<ObjectInspector.Category>();
+            Object o = getJavaObject(args[i].get(), oi, categories);
+            try {
+                if (o != null) {
+                    Util.check(types[i - 1], o);
+                }
+            } catch (IOException e) {
+                throw new HiveException(e);
             }
-        } catch (IOException e) {
-            throw new HiveException(e);
+            row.add(o == null ? "null" : o);
+            row.add(":" + (o == null ? "null" : o.getClass()) + ":" + categories);
         }
-        row.add(o == null ? "null" : o);
-        row.add(":" + (o == null ? "null" : o.getClass()) + ":" + categories);
+        return row.toString();
     }
-    return row.toString();
-}
 
-private Object getJavaObject(Object o, ObjectInspector oi, List<Category> categories) {
-    if(categories != null) {
-        categories.add(oi.getCategory());
+    private Object getJavaObject(Object o, ObjectInspector oi, List<Category> categories) {
+        if (categories != null) {
+            categories.add(oi.getCategory());
+        }
+        if (oi.getCategory() == ObjectInspector.Category.LIST) {
+            List<?> l = ((ListObjectInspector) oi).getList(o);
+            List<Object> result = new ArrayList<Object>();
+            ObjectInspector elemOI = ((ListObjectInspector) oi).getListElementObjectInspector();
+            for (Object lo : l) {
+                result.add(getJavaObject(lo, elemOI, categories));
+            }
+            return result;
+        } else if (oi.getCategory() == ObjectInspector.Category.MAP) {
+            Map<?, ?> m = ((MapObjectInspector) oi).getMap(o);
+            Map<String, String> result = new HashMap<String, String>();
+            ObjectInspector koi = ((MapObjectInspector) oi).getMapKeyObjectInspector();
+            ObjectInspector voi = ((MapObjectInspector) oi).getMapValueObjectInspector();
+            for (Entry<?, ?> e : m.entrySet()) {
+                result.put((String) getJavaObject(e.getKey(), koi, null),
+                    (String) getJavaObject(e.getValue(), voi, null));
+            }
+            return result;
+
+        } else if (oi.getCategory() == ObjectInspector.Category.STRUCT) {
+            List<Object> s = ((StructObjectInspector) oi).getStructFieldsDataAsList(o);
+            List<? extends StructField> sf = ((StructObjectInspector) oi).getAllStructFieldRefs();
+            List<Object> result = new ArrayList<Object>();
+            for (int i = 0; i < s.size(); i++) {
+                result.add(getJavaObject(s.get(i), sf.get(i).getFieldObjectInspector(), categories));
+            }
+            return result;
+        } else if (oi.getCategory() == ObjectInspector.Category.PRIMITIVE) {
+            return ((PrimitiveObjectInspector) oi).getPrimitiveJavaObject(o);
+        }
+        throw new RuntimeException("Unexpected error!");
     }
-    if(oi.getCategory() == ObjectInspector.Category.LIST) {
-        List<?> l = ((ListObjectInspector)oi).getList(o);
-        List<Object> result = new ArrayList<Object>();
-        ObjectInspector elemOI = ((ListObjectInspector)oi).getListElementObjectInspector();
-        for(Object lo : l) {
-            result.add(getJavaObject(lo, elemOI, categories));    
-        }
-        return result;
-    } else if (oi.getCategory() == ObjectInspector.Category.MAP) {
-        Map<?,?> m = ((MapObjectInspector)oi).getMap(o);
-        Map<String, String> result = new HashMap<String, String>();
-        ObjectInspector koi = ((MapObjectInspector)oi).getMapKeyObjectInspector();
-        ObjectInspector voi = ((MapObjectInspector)oi).getMapValueObjectInspector();
-        for(Entry<?,?> e: m.entrySet()) {
-            result.put((String)getJavaObject(e.getKey(), koi, null), 
-                    (String)getJavaObject(e.getValue(), voi, null));
-        }
-        return result;
-        
-    } else if (oi.getCategory() == ObjectInspector.Category.STRUCT) {
-        List<Object> s = ((StructObjectInspector)oi).getStructFieldsDataAsList(o);
-        List<? extends StructField> sf = ((StructObjectInspector)oi).getAllStructFieldRefs();
-        List<Object> result = new ArrayList<Object>();
-        for(int i = 0; i < s.size(); i++) {
-            result.add(getJavaObject(s.get(i), sf.get(i).getFieldObjectInspector(), categories));
-        }
-        return result;
-    } else if(oi.getCategory() == ObjectInspector.Category.PRIMITIVE) {
-        return ((PrimitiveObjectInspector)oi).getPrimitiveJavaObject(o);
+
+    @Override
+    public String getDisplayString(String[] arg0) {
+        return null;
     }
-    throw new RuntimeException("Unexpected error!");
-}
 
-@Override
-public String getDisplayString(String[] arg0) {
-    return null;
-}
-
-@Override
-public ObjectInspector initialize(ObjectInspector[] argOIs)
+    @Override
+    public ObjectInspector initialize(ObjectInspector[] argOIs)
         throws UDFArgumentException {
-    this.argOIs = argOIs;
-    return ObjectInspectorFactory.getReflectionObjectInspector(String.class, 
+        this.argOIs = argOIs;
+        return ObjectInspectorFactory.getReflectionObjectInspector(String.class,
             ObjectInspectorOptions.JAVA);
-}
+    }
 
 }
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/ReadJson.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/ReadJson.java
index a3d7df3..c52bac4 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/ReadJson.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/ReadJson.java
@@ -43,69 +43,69 @@
  * table. It performs a group by on the first column and a SUM operation on the
  * other columns. This is to simulate a typical operation in a map reduce program
  * to test that hcat hands the right data to the map reduce program
- * 
+ *
  * Usage: hadoop jar sumnumbers <serveruri> <output dir> <-libjars hive-hcat jar>
-            The <tab|ctrla> argument controls the output delimiter
-            The hcat jar location should be specified as file://<full path to jar>
+ The <tab|ctrla> argument controls the output delimiter
+ The hcat jar location should be specified as file://<full path to jar>
  */
 public class ReadJson extends Configured implements Tool {
-    
-  public static class Map
-       extends Mapper<WritableComparable, HCatRecord, IntWritable, HCatRecord>{
-      
-      String s;
-      Integer i;
-      Double d;
-      
-    @Override
-  protected void map(WritableComparable key, HCatRecord value, 
-          org.apache.hadoop.mapreduce.Mapper<WritableComparable,HCatRecord,
-          IntWritable,HCatRecord>.Context context) 
-    throws IOException ,InterruptedException {
-        s = value.get(0)==null?null:(String)value.get(0);
-        i = value.get(1)==null?null:(Integer)value.get(1);
-        d = value.get(2)==null?null:(Double)value.get(2);
-        
-        HCatRecord record = new DefaultHCatRecord(3);
-        record.set(0, s);
-        record.set(1, i);
-        record.set(2, d);
-        
-        context.write(null, record);
 
+    public static class Map
+        extends Mapper<WritableComparable, HCatRecord, IntWritable, HCatRecord> {
+
+        String s;
+        Integer i;
+        Double d;
+
+        @Override
+        protected void map(WritableComparable key, HCatRecord value,
+                           org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord,
+                               IntWritable, HCatRecord>.Context context)
+            throws IOException, InterruptedException {
+            s = value.get(0) == null ? null : (String) value.get(0);
+            i = value.get(1) == null ? null : (Integer) value.get(1);
+            d = value.get(2) == null ? null : (Double) value.get(2);
+
+            HCatRecord record = new DefaultHCatRecord(3);
+            record.set(0, s);
+            record.set(1, i);
+            record.set(2, d);
+
+            context.write(null, record);
+
+        }
     }
-  }
-  
-   public int run(String[] args) throws Exception {
-    Configuration conf = getConf();
-    args = new GenericOptionsParser(conf, args).getRemainingArgs();
 
-    String serverUri = args[0];
-    String tableName = args[1];
-    String outputDir = args[2];
-    String dbName = null;
-    
-    String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
-    if(principalID != null)
-    conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
-    Job job = new Job(conf, "ReadJson");
-    HCatInputFormat.setInput(job, InputJobInfo.create(
-    		dbName, tableName, null));
-    // initialize HCatOutputFormat
-    
-    job.setInputFormatClass(HCatInputFormat.class);
-    job.setOutputFormatClass(TextOutputFormat.class);
-    job.setJarByClass(ReadJson.class);
-    job.setMapperClass(Map.class);
-    job.setOutputKeyClass(IntWritable.class);
-    job.setOutputValueClass(HCatRecord.class);
-    job.setNumReduceTasks(0);
-    FileOutputFormat.setOutputPath(job, new Path(outputDir));
-    return (job.waitForCompletion(true) ? 0 : 1);
-  }
-   
-   public static void main(String[] args) throws Exception {
-       int exitCode = ToolRunner.run(new ReadJson(), args);
-       System.exit(exitCode);
-   }
+    public int run(String[] args) throws Exception {
+        Configuration conf = getConf();
+        args = new GenericOptionsParser(conf, args).getRemainingArgs();
+
+        String serverUri = args[0];
+        String tableName = args[1];
+        String outputDir = args[2];
+        String dbName = null;
+
+        String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+        if (principalID != null)
+            conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
+        Job job = new Job(conf, "ReadJson");
+        HCatInputFormat.setInput(job, InputJobInfo.create(
+            dbName, tableName, null));
+        // initialize HCatOutputFormat
+
+        job.setInputFormatClass(HCatInputFormat.class);
+        job.setOutputFormatClass(TextOutputFormat.class);
+        job.setJarByClass(ReadJson.class);
+        job.setMapperClass(Map.class);
+        job.setOutputKeyClass(IntWritable.class);
+        job.setOutputValueClass(HCatRecord.class);
+        job.setNumReduceTasks(0);
+        FileOutputFormat.setOutputPath(job, new Path(outputDir));
+        return (job.waitForCompletion(true) ? 0 : 1);
+    }
+
+    public static void main(String[] args) throws Exception {
+        int exitCode = ToolRunner.run(new ReadJson(), args);
+        System.exit(exitCode);
+    }
 }
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/ReadRC.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/ReadRC.java
index 5bafc04..116c993 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/ReadRC.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/ReadRC.java
@@ -43,70 +43,70 @@
  * table. It performs a group by on the first column and a SUM operation on the
  * other columns. This is to simulate a typical operation in a map reduce program
  * to test that hcat hands the right data to the map reduce program
- * 
+ *
  * Usage: hadoop jar sumnumbers <serveruri> <output dir> <-libjars hive-hcat jar>
-            The <tab|ctrla> argument controls the output delimiter
-            The hcat jar location should be specified as file://<full path to jar>
+ The <tab|ctrla> argument controls the output delimiter
+ The hcat jar location should be specified as file://<full path to jar>
  */
 public class ReadRC extends Configured implements Tool {
-    
-  public static class Map
-       extends Mapper<WritableComparable, HCatRecord, IntWritable, HCatRecord>{
-      
-      String name;
-      int age;
-      double gpa;
-      
-    @Override
-  protected void map(WritableComparable key, HCatRecord value, 
-          org.apache.hadoop.mapreduce.Mapper<WritableComparable,HCatRecord,
-          IntWritable,HCatRecord>.Context context) 
-    throws IOException ,InterruptedException {
-        name = (String)value.get(0);
-        age = (Integer)value.get(1);
-        gpa = (Double)value.get(2);
-        gpa = Math.floor(gpa) + 0.1;
-        
-        HCatRecord record = new DefaultHCatRecord(3);
-        record.set(0, name);
-        record.set(1, age);
-        record.set(2, gpa);
-        
-        context.write(null, record);
 
+    public static class Map
+        extends Mapper<WritableComparable, HCatRecord, IntWritable, HCatRecord> {
+
+        String name;
+        int age;
+        double gpa;
+
+        @Override
+        protected void map(WritableComparable key, HCatRecord value,
+                           org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord,
+                               IntWritable, HCatRecord>.Context context)
+            throws IOException, InterruptedException {
+            name = (String) value.get(0);
+            age = (Integer) value.get(1);
+            gpa = (Double) value.get(2);
+            gpa = Math.floor(gpa) + 0.1;
+
+            HCatRecord record = new DefaultHCatRecord(3);
+            record.set(0, name);
+            record.set(1, age);
+            record.set(2, gpa);
+
+            context.write(null, record);
+
+        }
     }
-  }
-  
-   public int run(String[] args) throws Exception {
-    Configuration conf = getConf();
-    args = new GenericOptionsParser(conf, args).getRemainingArgs();
 
-    String serverUri = args[0];
-    String tableName = args[1];
-    String outputDir = args[2];
-    String dbName = null;
-    
-    String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
-    if(principalID != null)
-    conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
-    Job job = new Job(conf, "ReadRC");
-    HCatInputFormat.setInput(job, InputJobInfo.create(
-    		dbName, tableName, null));
-    // initialize HCatOutputFormat
-    
-    job.setInputFormatClass(HCatInputFormat.class);
-    job.setOutputFormatClass(TextOutputFormat.class);
-    job.setJarByClass(ReadRC.class);
-    job.setMapperClass(Map.class);
-    job.setOutputKeyClass(IntWritable.class);
-    job.setOutputValueClass(HCatRecord.class);
-    job.setNumReduceTasks(0);
-    FileOutputFormat.setOutputPath(job, new Path(outputDir));
-    return (job.waitForCompletion(true) ? 0 : 1);
-  }
-   
-   public static void main(String[] args) throws Exception {
-       int exitCode = ToolRunner.run(new ReadRC(), args);
-       System.exit(exitCode);
-   }
+    public int run(String[] args) throws Exception {
+        Configuration conf = getConf();
+        args = new GenericOptionsParser(conf, args).getRemainingArgs();
+
+        String serverUri = args[0];
+        String tableName = args[1];
+        String outputDir = args[2];
+        String dbName = null;
+
+        String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+        if (principalID != null)
+            conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
+        Job job = new Job(conf, "ReadRC");
+        HCatInputFormat.setInput(job, InputJobInfo.create(
+            dbName, tableName, null));
+        // initialize HCatOutputFormat
+
+        job.setInputFormatClass(HCatInputFormat.class);
+        job.setOutputFormatClass(TextOutputFormat.class);
+        job.setJarByClass(ReadRC.class);
+        job.setMapperClass(Map.class);
+        job.setOutputKeyClass(IntWritable.class);
+        job.setOutputValueClass(HCatRecord.class);
+        job.setNumReduceTasks(0);
+        FileOutputFormat.setOutputPath(job, new Path(outputDir));
+        return (job.waitForCompletion(true) ? 0 : 1);
+    }
+
+    public static void main(String[] args) throws Exception {
+        int exitCode = ToolRunner.run(new ReadRC(), args);
+        System.exit(exitCode);
+    }
 }
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/ReadText.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/ReadText.java
index 9f15c01..c3c6c72 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/ReadText.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/ReadText.java
@@ -43,81 +43,81 @@
  * table. It performs a group by on the first column and a SUM operation on the
  * other columns. This is to simulate a typical operation in a map reduce program
  * to test that hcat hands the right data to the map reduce program
- * 
+ *
  * Usage: hadoop jar sumnumbers <serveruri> <output dir> <-libjars hive-hcat jar>
-            The <tab|ctrla> argument controls the output delimiter
-            The hcat jar location should be specified as file://<full path to jar>
+ The <tab|ctrla> argument controls the output delimiter
+ The hcat jar location should be specified as file://<full path to jar>
  */
 public class ReadText extends Configured implements Tool {
-    
-  public static class Map
-       extends Mapper<WritableComparable, HCatRecord, IntWritable, HCatRecord>{
-      
-      byte t;
-      short si;
-      int i;
-      long b;
-      float f;
-      double d;
-      String s;
-      
-    @Override
-  protected void map(WritableComparable key, HCatRecord value, 
-          org.apache.hadoop.mapreduce.Mapper<WritableComparable,HCatRecord,
-          IntWritable,HCatRecord>.Context context) 
-    throws IOException ,InterruptedException {
-        t = (Byte)value.get(0);
-        si = (Short)value.get(1);
-        i = (Integer)value.get(2);
-        b = (Long)value.get(3);
-        f = (Float)value.get(4);
-        d = (Double)value.get(5);
-        s = (String)value.get(6);
-        
-        HCatRecord record = new DefaultHCatRecord(7);
-        record.set(0, t);
-        record.set(1, si);
-        record.set(2, i);
-        record.set(3, b);
-        record.set(4, f);
-        record.set(5, d);
-        record.set(6, s);
-        
-        context.write(null, record);
 
+    public static class Map
+        extends Mapper<WritableComparable, HCatRecord, IntWritable, HCatRecord> {
+
+        byte t;
+        short si;
+        int i;
+        long b;
+        float f;
+        double d;
+        String s;
+
+        @Override
+        protected void map(WritableComparable key, HCatRecord value,
+                           org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord,
+                               IntWritable, HCatRecord>.Context context)
+            throws IOException, InterruptedException {
+            t = (Byte) value.get(0);
+            si = (Short) value.get(1);
+            i = (Integer) value.get(2);
+            b = (Long) value.get(3);
+            f = (Float) value.get(4);
+            d = (Double) value.get(5);
+            s = (String) value.get(6);
+
+            HCatRecord record = new DefaultHCatRecord(7);
+            record.set(0, t);
+            record.set(1, si);
+            record.set(2, i);
+            record.set(3, b);
+            record.set(4, f);
+            record.set(5, d);
+            record.set(6, s);
+
+            context.write(null, record);
+
+        }
     }
-  }
-  
-   public int run(String[] args) throws Exception {
-    Configuration conf = getConf();
-    args = new GenericOptionsParser(conf, args).getRemainingArgs();
 
-    String serverUri = args[0];
-    String tableName = args[1];
-    String outputDir = args[2];
-    String dbName = null;
-    
-    String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
-    if(principalID != null)
-    conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
-    Job job = new Job(conf, "ReadText");
-    HCatInputFormat.setInput(job, InputJobInfo.create(
-    		dbName, tableName, null));
-    // initialize HCatOutputFormat
-    
-    job.setInputFormatClass(HCatInputFormat.class);
-    job.setOutputFormatClass(TextOutputFormat.class);
-    job.setJarByClass(ReadText.class);
-    job.setMapperClass(Map.class);
-    job.setOutputKeyClass(IntWritable.class);
-    job.setOutputValueClass(HCatRecord.class);
-    job.setNumReduceTasks(0);
-    FileOutputFormat.setOutputPath(job, new Path(outputDir));
-    return (job.waitForCompletion(true) ? 0 : 1);
-  }
-   
-   public static void main(String[] args) throws Exception {
-       int exitCode = ToolRunner.run(new ReadText(), args);
-       System.exit(exitCode);
-   }
+    public int run(String[] args) throws Exception {
+        Configuration conf = getConf();
+        args = new GenericOptionsParser(conf, args).getRemainingArgs();
+
+        String serverUri = args[0];
+        String tableName = args[1];
+        String outputDir = args[2];
+        String dbName = null;
+
+        String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+        if (principalID != null)
+            conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
+        Job job = new Job(conf, "ReadText");
+        HCatInputFormat.setInput(job, InputJobInfo.create(
+            dbName, tableName, null));
+        // initialize HCatOutputFormat
+
+        job.setInputFormatClass(HCatInputFormat.class);
+        job.setOutputFormatClass(TextOutputFormat.class);
+        job.setJarByClass(ReadText.class);
+        job.setMapperClass(Map.class);
+        job.setOutputKeyClass(IntWritable.class);
+        job.setOutputValueClass(HCatRecord.class);
+        job.setNumReduceTasks(0);
+        FileOutputFormat.setOutputPath(job, new Path(outputDir));
+        return (job.waitForCompletion(true) ? 0 : 1);
+    }
+
+    public static void main(String[] args) throws Exception {
+        int exitCode = ToolRunner.run(new ReadText(), args);
+        System.exit(exitCode);
+    }
 }
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/ReadWrite.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/ReadWrite.java
index f3b9824..a158a80 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/ReadWrite.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/ReadWrite.java
@@ -43,7 +43,7 @@
  * table. It performs a group by on the first column and a SUM operation on the
  * other columns. This is to simulate a typical operation in a map reduce
  * program to test that hcat hands the right data to the map reduce program
- * 
+ *
  * Usage: hadoop jar sumnumbers <serveruri> <output dir> <-libjars hive-hcat
  * jar> The <tab|ctrla> argument controls the output delimiter The hcat jar
  * location should be specified as file://<full path to jar>
@@ -51,7 +51,7 @@
 public class ReadWrite extends Configured implements Tool {
 
     public static class Map extends
-            Mapper<WritableComparable, HCatRecord, Text, HCatRecord> {
+        Mapper<WritableComparable, HCatRecord, Text, HCatRecord> {
 
         String name;
         int age;
@@ -59,10 +59,10 @@
 
         @Override
         protected void map(
-                WritableComparable key,
-                HCatRecord value,
-                org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, Text, HCatRecord>.Context context)
-                throws IOException, InterruptedException {
+            WritableComparable key,
+            HCatRecord value,
+            org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, Text, HCatRecord>.Context context)
+            throws IOException, InterruptedException {
             name = (String) value.get(0);
             age = (Integer) value.get(1);
             gpa = (Double) value.get(2);
@@ -81,12 +81,12 @@
         String dbName = null;
 
         String principalID = System
-                .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+            .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
         if (principalID != null)
             conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
         Job job = new Job(conf, "ReadWrite");
         HCatInputFormat.setInput(job, InputJobInfo.create(dbName,
-                inputTableName, null));
+            inputTableName, null));
         // initialize HCatOutputFormat
 
         job.setInputFormatClass(HCatInputFormat.class);
@@ -95,10 +95,10 @@
         job.setOutputKeyClass(Text.class);
         job.setOutputValueClass(DefaultHCatRecord.class);
         HCatOutputFormat.setOutput(job, OutputJobInfo.create(dbName,
-                outputTableName, null));
+            outputTableName, null));
         HCatSchema s = HCatInputFormat.getTableSchema(job);
         System.err.println("INFO: output schema explicitly set for writing:"
-                + s);
+            + s);
         HCatOutputFormat.setSchema(job, s);
         job.setOutputFormatClass(HCatOutputFormat.class);
         return (job.waitForCompletion(true) ? 0 : 1);
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/SimpleRead.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/SimpleRead.java
index e911451..ebcdd3a 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/SimpleRead.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/SimpleRead.java
@@ -43,65 +43,65 @@
  * table. It performs a group by on the first column and a SUM operation on the
  * other columns. This is to simulate a typical operation in a map reduce program
  * to test that hcat hands the right data to the map reduce program
- * 
+ *
  * Usage: hadoop jar sumnumbers <serveruri> <output dir> <-libjars hive-hcat jar>
-            The <tab|ctrla> argument controls the output delimiter
-            The hcat jar location should be specified as file://<full path to jar>
+ The <tab|ctrla> argument controls the output delimiter
+ The hcat jar location should be specified as file://<full path to jar>
  */
 public class SimpleRead extends Configured implements Tool {
 
     private static final String TABLE_NAME = "studenttab10k";
     private static final String TAB = "\t";
-    
-  public static class Map
-       extends Mapper<WritableComparable, HCatRecord, Text, IntWritable>{
-      
-      String name;
-      int age;
-      double gpa;
-      
-    @Override
-  protected void map(WritableComparable key, HCatRecord value, 
-          org.apache.hadoop.mapreduce.Mapper<WritableComparable,HCatRecord,
-          Text,IntWritable>.Context context) 
-    throws IOException ,InterruptedException {
-        name = (String) value.get(0);
-        age = (Integer) value.get(1);
-        gpa = (Double) value.get(2);
-        context.write(new Text(name), new IntWritable(age));
 
+    public static class Map
+        extends Mapper<WritableComparable, HCatRecord, Text, IntWritable> {
+
+        String name;
+        int age;
+        double gpa;
+
+        @Override
+        protected void map(WritableComparable key, HCatRecord value,
+                           org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord,
+                               Text, IntWritable>.Context context)
+            throws IOException, InterruptedException {
+            name = (String) value.get(0);
+            age = (Integer) value.get(1);
+            gpa = (Double) value.get(2);
+            context.write(new Text(name), new IntWritable(age));
+
+        }
     }
-  }
-  
-   public int run(String[] args) throws Exception {
-    Configuration conf = getConf();
-    args = new GenericOptionsParser(conf, args).getRemainingArgs();
 
-    String serverUri = args[0];
-    String tableName = args[1];
-    String outputDir = args[2];
-    String dbName = null;
-    
-    String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
-    if(principalID != null)
-    conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
-    Job job = new Job(conf, "SimpleRead");
-    HCatInputFormat.setInput(job, InputJobInfo.create(
-    		dbName, tableName, null));
-    // initialize HCatOutputFormat
-    
-    job.setInputFormatClass(HCatInputFormat.class);
-    job.setOutputFormatClass(TextOutputFormat.class);
-    job.setJarByClass(SimpleRead.class);
-    job.setMapperClass(Map.class);
-    job.setOutputKeyClass(Text.class);
-    job.setOutputValueClass(IntWritable.class);
-    FileOutputFormat.setOutputPath(job, new Path(outputDir));
-    return (job.waitForCompletion(true) ? 0 : 1);
-  }
-   
-   public static void main(String[] args) throws Exception {
-       int exitCode = ToolRunner.run(new SimpleRead(), args);
-       System.exit(exitCode);
-   }
+    public int run(String[] args) throws Exception {
+        Configuration conf = getConf();
+        args = new GenericOptionsParser(conf, args).getRemainingArgs();
+
+        String serverUri = args[0];
+        String tableName = args[1];
+        String outputDir = args[2];
+        String dbName = null;
+
+        String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+        if (principalID != null)
+            conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
+        Job job = new Job(conf, "SimpleRead");
+        HCatInputFormat.setInput(job, InputJobInfo.create(
+            dbName, tableName, null));
+        // initialize HCatOutputFormat
+
+        job.setInputFormatClass(HCatInputFormat.class);
+        job.setOutputFormatClass(TextOutputFormat.class);
+        job.setJarByClass(SimpleRead.class);
+        job.setMapperClass(Map.class);
+        job.setOutputKeyClass(Text.class);
+        job.setOutputValueClass(IntWritable.class);
+        FileOutputFormat.setOutputPath(job, new Path(outputDir));
+        return (job.waitForCompletion(true) ? 0 : 1);
+    }
+
+    public static void main(String[] args) throws Exception {
+        int exitCode = ToolRunner.run(new SimpleRead(), args);
+        System.exit(exitCode);
+    }
 }
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/StoreComplex.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/StoreComplex.java
index da344fb..b78bf83 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/StoreComplex.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/StoreComplex.java
@@ -43,93 +43,93 @@
  * table and writes to "complex_nopart_empty_initially" table. It reads data from complex which
  * is an unpartitioned table and stores the data as-is into complex_empty_initially table
  * (which is also unpartitioned)
- * 
+ *
  * Usage: hadoop jar testudf.jar storecomplex <serveruri> <-libjars hive-hcat jar>  
-        The hcat jar location should be specified as file://<full path to jar>
+ The hcat jar location should be specified as file://<full path to jar>
  */
 public class StoreComplex {
 
     private static final String COMPLEX_TABLE_NAME = "complex";
     private static final String COMPLEX_NOPART_EMPTY_INITIALLY_TABLE_NAME = "complex_nopart_empty_initially";
-    
-    
-  public static class ComplexMapper 
-       extends Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>{
-      
-    @Override
-  protected void map(WritableComparable key, HCatRecord value, 
-          org.apache.hadoop.mapreduce.Mapper<WritableComparable,HCatRecord,
-          WritableComparable,HCatRecord>.Context context) 
-    throws IOException ,InterruptedException {
-        // just write out the value as-is
-        context.write(new IntWritable(0), value);
 
-    }
-  }
-  
-  
-   public static void main(String[] args) throws Exception {
-    Configuration conf = new Configuration();
-    args = new GenericOptionsParser(conf, args).getRemainingArgs();
-    String[] otherArgs = new String[1];
-    int j = 0;
-    for(int i = 0; i < args.length; i++) {
-        if(args[i].equals("-libjars")) {
-            // generic options parser doesn't seem to work!
-            conf.set("tmpjars", args[i+1]);
-            i = i+1; // skip it , the for loop will skip its value                
-        } else {
-            otherArgs[j++] = args[i];
+
+    public static class ComplexMapper
+        extends Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
+
+        @Override
+        protected void map(WritableComparable key, HCatRecord value,
+                           org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord,
+                               WritableComparable, HCatRecord>.Context context)
+            throws IOException, InterruptedException {
+            // just write out the value as-is
+            context.write(new IntWritable(0), value);
+
         }
     }
-    if (otherArgs.length != 1) {
-      usage();
-    }
-    String serverUri = otherArgs[0];
-    String tableName = COMPLEX_TABLE_NAME;
-    String dbName = "default";
-    Map<String, String> outputPartitionKvps = new HashMap<String, String>();
-    String outputTableName = null;
-    outputTableName = COMPLEX_NOPART_EMPTY_INITIALLY_TABLE_NAME;
-    // test with null or empty randomly
-    if(new Random().nextInt(2) == 0) {
-        System.err.println("INFO: output partition keys set to null for writing");
-        outputPartitionKvps = null;
-    }
-    String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
-    if(principalID != null)
-    conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
-    Job job = new Job(conf, "storecomplex");
-    // initialize HCatInputFormat
 
-    HCatInputFormat.setInput(job, InputJobInfo.create(
-    		dbName, tableName, null));
-    // initialize HCatOutputFormat
-    HCatOutputFormat.setOutput(job, OutputJobInfo.create(
+
+    public static void main(String[] args) throws Exception {
+        Configuration conf = new Configuration();
+        args = new GenericOptionsParser(conf, args).getRemainingArgs();
+        String[] otherArgs = new String[1];
+        int j = 0;
+        for (int i = 0; i < args.length; i++) {
+            if (args[i].equals("-libjars")) {
+                // generic options parser doesn't seem to work!
+                conf.set("tmpjars", args[i + 1]);
+                i = i + 1; // skip it , the for loop will skip its value
+            } else {
+                otherArgs[j++] = args[i];
+            }
+        }
+        if (otherArgs.length != 1) {
+            usage();
+        }
+        String serverUri = otherArgs[0];
+        String tableName = COMPLEX_TABLE_NAME;
+        String dbName = "default";
+        Map<String, String> outputPartitionKvps = new HashMap<String, String>();
+        String outputTableName = null;
+        outputTableName = COMPLEX_NOPART_EMPTY_INITIALLY_TABLE_NAME;
+        // test with null or empty randomly
+        if (new Random().nextInt(2) == 0) {
+            System.err.println("INFO: output partition keys set to null for writing");
+            outputPartitionKvps = null;
+        }
+        String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+        if (principalID != null)
+            conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
+        Job job = new Job(conf, "storecomplex");
+        // initialize HCatInputFormat
+
+        HCatInputFormat.setInput(job, InputJobInfo.create(
+            dbName, tableName, null));
+        // initialize HCatOutputFormat
+        HCatOutputFormat.setOutput(job, OutputJobInfo.create(
             dbName, outputTableName, outputPartitionKvps));
-    
-    
-    HCatSchema s = HCatInputFormat.getTableSchema(job);
-    HCatOutputFormat.setSchema(job, s);
-    job.setInputFormatClass(HCatInputFormat.class);
-    job.setOutputFormatClass(HCatOutputFormat.class);
-    job.setJarByClass(StoreComplex.class);
-    job.setMapperClass(ComplexMapper.class);
-    job.setOutputKeyClass(IntWritable.class);
-    job.setOutputValueClass(DefaultHCatRecord.class);
-    System.exit(job.waitForCompletion(true) ? 0 : 1);
-  }
+
+
+        HCatSchema s = HCatInputFormat.getTableSchema(job);
+        HCatOutputFormat.setSchema(job, s);
+        job.setInputFormatClass(HCatInputFormat.class);
+        job.setOutputFormatClass(HCatOutputFormat.class);
+        job.setJarByClass(StoreComplex.class);
+        job.setMapperClass(ComplexMapper.class);
+        job.setOutputKeyClass(IntWritable.class);
+        job.setOutputValueClass(DefaultHCatRecord.class);
+        System.exit(job.waitForCompletion(true) ? 0 : 1);
+    }
 
 
     /**
-     * 
+     *
      */
     private static void usage() {
         System.err.println("Usage: hadoop jar testudf.jar storecomplex <serveruri> <-libjars hive-hcat jar>\n" +
-        "The hcat jar location should be specified as file://<full path to jar>\n");
+            "The hcat jar location should be specified as file://<full path to jar>\n");
         System.exit(2);
-        
+
     }
-   
+
 
 }
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/StoreDemo.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/StoreDemo.java
index d35ceac..e1c3b45 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/StoreDemo.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/StoreDemo.java
@@ -48,104 +48,105 @@
  * 'numbers_nopart_pig_empty_initially' (unpartitioned) table with the tinyint
  * and smallint columns in "numbers" being stored as "int" (since pig cannot handle
  * tinyint and smallint)
- * 
+ *
  * Usage: hadoop jar storenumbers <serveruri> <part|nopart|nopart_pig> <-libjars hive-hcat jar>
-        If the second argument is "part" data is written to datestamp = '2010101' partition of the numbers_part_empty_initially table.
-        If the second argument is "nopart", data is written to the unpartitioned numbers_nopart_empty_initially table.
-        If the second argument is "nopart_pig", data is written to the unpartitioned numbers_nopart_pig_empty_initially table.
-        The hcat jar location should be specified as file://<full path to jar>
+ If the second argument is "part" data is written to datestamp = '2010101' partition of the numbers_part_empty_initially table.
+ If the second argument is "nopart", data is written to the unpartitioned numbers_nopart_empty_initially table.
+ If the second argument is "nopart_pig", data is written to the unpartitioned numbers_nopart_pig_empty_initially table.
+ The hcat jar location should be specified as file://<full path to jar>
  */
 public class StoreDemo {
 
     private static final String NUMBERS_PARTITIONED_TABLE_NAME = "demo_partitioned";
     private static final String NUMBERS_TABLE_NAME = "demo";
-    
-  public static class SumMapper 
-       extends Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>{
-      
-    
-      Integer intnum;
-     
-      Double doublenum;
-    @Override
-  protected void map(WritableComparable key, HCatRecord value, 
-          org.apache.hadoop.mapreduce.Mapper<WritableComparable,HCatRecord,
-          WritableComparable,HCatRecord>.Context context) 
-    throws IOException ,InterruptedException {
-        intnum = ((Integer)value.get(0));
-        value.set(0, intnum + 20);
-        doublenum = ((Double) value.get(1));
-        value.set(1, (Double) (doublenum + 20));
-        context.write(new IntWritable(0), value);
 
-    }
-  }
-  
-  
-   public static void main(String[] args) throws Exception {
-    Configuration conf = new Configuration();
-    args = new GenericOptionsParser(conf, args).getRemainingArgs();
-    String[] otherArgs = new String[1];
-    int j = 0;
-    for(int i = 0; i < args.length; i++) {
-        if(args[i].equals("-libjars")) {
-            // generic options parser doesn't seem to work!
-            conf.set("tmpjars", args[i+1]);
-            i = i+1; // skip it , the for loop will skip its value                
-        } else {
-            otherArgs[j++] = args[i];
+    public static class SumMapper
+        extends Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
+
+
+        Integer intnum;
+
+        Double doublenum;
+
+        @Override
+        protected void map(WritableComparable key, HCatRecord value,
+                           org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord,
+                               WritableComparable, HCatRecord>.Context context)
+            throws IOException, InterruptedException {
+            intnum = ((Integer) value.get(0));
+            value.set(0, intnum + 20);
+            doublenum = ((Double) value.get(1));
+            value.set(1, (Double) (doublenum + 20));
+            context.write(new IntWritable(0), value);
+
         }
     }
-    if (otherArgs.length != 1) {
-      usage();
-    }
-    String serverUri = otherArgs[0];
-    
-    String tableName = NUMBERS_TABLE_NAME;
-    String dbName = "default";
-    Map<String, String> outputPartitionKvps = new HashMap<String, String>();
-    String outputTableName = NUMBERS_PARTITIONED_TABLE_NAME;
-    outputPartitionKvps.put("datestamp", "20100102");
-    
-    String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
-    if(principalID != null)
-    conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
-    Job job = new Job(conf, "storedemo");
-    // initialize HCatInputFormat
-    HCatInputFormat.setInput(job, InputJobInfo.create(
-    		dbName, tableName, null));
-    // initialize HCatOutputFormat
-    HCatOutputFormat.setOutput(job, OutputJobInfo.create(
+
+
+    public static void main(String[] args) throws Exception {
+        Configuration conf = new Configuration();
+        args = new GenericOptionsParser(conf, args).getRemainingArgs();
+        String[] otherArgs = new String[1];
+        int j = 0;
+        for (int i = 0; i < args.length; i++) {
+            if (args[i].equals("-libjars")) {
+                // generic options parser doesn't seem to work!
+                conf.set("tmpjars", args[i + 1]);
+                i = i + 1; // skip it , the for loop will skip its value
+            } else {
+                otherArgs[j++] = args[i];
+            }
+        }
+        if (otherArgs.length != 1) {
+            usage();
+        }
+        String serverUri = otherArgs[0];
+
+        String tableName = NUMBERS_TABLE_NAME;
+        String dbName = "default";
+        Map<String, String> outputPartitionKvps = new HashMap<String, String>();
+        String outputTableName = NUMBERS_PARTITIONED_TABLE_NAME;
+        outputPartitionKvps.put("datestamp", "20100102");
+
+        String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+        if (principalID != null)
+            conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
+        Job job = new Job(conf, "storedemo");
+        // initialize HCatInputFormat
+        HCatInputFormat.setInput(job, InputJobInfo.create(
+            dbName, tableName, null));
+        // initialize HCatOutputFormat
+        HCatOutputFormat.setOutput(job, OutputJobInfo.create(
             dbName, outputTableName, outputPartitionKvps));
-    // test with and without specifying schema randomly
-    HCatSchema s = HCatInputFormat.getTableSchema(job);
-    System.err.println("INFO: output schema explicitly set for writing:" + s);
-    HCatOutputFormat.setSchema(job, s);
-    
-    job.setInputFormatClass(HCatInputFormat.class);
-    job.setOutputFormatClass(HCatOutputFormat.class);
-    job.setJarByClass(StoreDemo.class);
-    job.setMapperClass(SumMapper.class);
-    job.setOutputKeyClass(IntWritable.class);
-    job.setNumReduceTasks(0);
-    job.setOutputValueClass(DefaultHCatRecord.class);
-    System.exit(job.waitForCompletion(true) ? 0 : 1);
-  }
+        // test with and without specifying schema randomly
+        HCatSchema s = HCatInputFormat.getTableSchema(job);
+        System.err.println("INFO: output schema explicitly set for writing:" + s);
+        HCatOutputFormat.setSchema(job, s);
+
+        job.setInputFormatClass(HCatInputFormat.class);
+        job.setOutputFormatClass(HCatOutputFormat.class);
+        job.setJarByClass(StoreDemo.class);
+        job.setMapperClass(SumMapper.class);
+        job.setOutputKeyClass(IntWritable.class);
+        job.setNumReduceTasks(0);
+        job.setOutputValueClass(DefaultHCatRecord.class);
+        System.exit(job.waitForCompletion(true) ? 0 : 1);
+    }
 
 
     /**
-     * 
+     *
      */
     private static void usage() {
         System.err.println("Usage: hadoop jar storenumbers <serveruri> <part|nopart|nopart_pig> <-libjars hive-hcat jar>\n" +
-                "\tIf the second argument is \"part\" data is written to datestamp = '2010101' partition of " +
-                "the numbers_part_empty_initially table.\n\tIf the second argument is \"nopart\", data is written to " +
-                "the unpartitioned numbers_nopart_empty_initially table.\n\tIf the second argument is \"nopart_pig\", " +
-                "data is written to the unpartitioned numbers_nopart_pig_empty_initially table.\nt" +
-        "The hcat jar location should be specified as file://<full path to jar>\n");
-    System.exit(2);
-        
+            "\tIf the second argument is \"part\" data is written to datestamp = '2010101' partition of " +
+            "the numbers_part_empty_initially table.\n\tIf the second argument is \"nopart\", data is written to " +
+            "the unpartitioned numbers_nopart_empty_initially table.\n\tIf the second argument is \"nopart_pig\", " +
+            "data is written to the unpartitioned numbers_nopart_pig_empty_initially table.\nt" +
+            "The hcat jar location should be specified as file://<full path to jar>\n");
+        System.exit(2);
+
     }
-   
+
 
 }
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/StoreNumbers.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/StoreNumbers.java
index 5c0a93d..2c906b7 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/StoreNumbers.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/StoreNumbers.java
@@ -52,12 +52,12 @@
  * 'numbers_nopart_pig_empty_initially' (unpartitioned) table with the tinyint
  * and smallint columns in "numbers" being stored as "int" (since pig cannot handle
  * tinyint and smallint)
- * 
+ *
  * Usage: hadoop jar storenumbers <serveruri> <part|nopart|nopart_pig> <-libjars hive-hcat jar>
-        If the second argument is "part" data is written to datestamp = '2010101' partition of the numbers_part_empty_initially table.
-        If the second argument is "nopart", data is written to the unpartitioned numbers_nopart_empty_initially table.
-        If the second argument is "nopart_pig", data is written to the unpartitioned numbers_nopart_pig_empty_initially table.
-        The hcat jar location should be specified as file://<full path to jar>
+ If the second argument is "part" data is written to datestamp = '2010101' partition of the numbers_part_empty_initially table.
+ If the second argument is "nopart", data is written to the unpartitioned numbers_nopart_empty_initially table.
+ If the second argument is "nopart_pig", data is written to the unpartitioned numbers_nopart_pig_empty_initially table.
+ The hcat jar location should be specified as file://<full path to jar>
  */
 public class StoreNumbers {
 
@@ -66,167 +66,167 @@
     private static final String NUMBERS_NON_PARTITIONED_TABLE_NAME = "numbers_nopart_empty_initially";
     private static final String NUMBERS_NON_PARTITIONED_PIG_TABLE_NAME = "numbers_nopart_pig_empty_initially";
     private static final String IS_PIG_NON_PART_TABLE = "is.pig.non.part.table";
-    
-  public static class SumMapper 
-       extends Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>{
-      
-      Integer intnum1000;
-      // though id is given as a Short by hcat, the map will emit it as an
-      // IntWritable so we can just sum in the reduce
-      Short id;
-      
-      // though intnum5 is handed as a Byte by hcat, the map() will emit it as
-      // an IntWritable so we can just sum in the reduce
-      Byte intnum5;
-      Integer intnum100;
-      Integer intnum;
-      Long longnum;
-      Float floatnum;
-      Double doublenum;
-    @Override
-  protected void map(WritableComparable key, HCatRecord value, 
-          org.apache.hadoop.mapreduce.Mapper<WritableComparable,HCatRecord,
-          WritableComparable,HCatRecord>.Context context) 
-    throws IOException ,InterruptedException {
-        boolean isnoPartPig = context.getConfiguration().getBoolean(IS_PIG_NON_PART_TABLE, false);
-        intnum1000 = ((Integer)value.get(0));
-        id = ((Short) value.get(1));
-        intnum5 = (((Byte)value.get(2)));
-        intnum100 = (((Integer) value.get(3)));
-        intnum = ((Integer) value.get(4));
-        longnum = ((Long) value.get(5));
-        floatnum = ((Float) value.get(6));
-        doublenum = ((Double) value.get(7));
-        HCatRecord output = new DefaultHCatRecord(8);
-        output.set(0, intnum1000 + 10);
-        if(isnoPartPig)
-        {
-            output.set(1, ((int)(id + 10)));
-        } else {
-            output.set(1, ((short)(id + 10)));
-        }
-        if(isnoPartPig) {
-            output.set(2,  (int)(intnum5 + 10));
-        } else {
-            output.set(2, (byte) (intnum5 + 10));
-        }
-        
-        output.set(3, intnum100 + 10);
-        output.set(4, intnum + 10);
-        output.set(5, (long) (longnum + 10));
-        output.set(6, (float) (floatnum + 10));
-        output.set(7, (double) (doublenum + 10));
-        for(int i = 0; i < 8; i++) {
-            System.err.println("XXX: class:" + output.get(i).getClass());
-        }
-        context.write(new IntWritable(0), output);
 
-    }
-  }
-  
-  
-   public static void main(String[] args) throws Exception {
-    Configuration conf = new Configuration();
-    args = new GenericOptionsParser(conf, args).getRemainingArgs();
-    String[] otherArgs = new String[2];
-    int j = 0;
-    for(int i = 0; i < args.length; i++) {
-        if(args[i].equals("-libjars")) {
-            // generic options parser doesn't seem to work!
-            conf.set("tmpjars", args[i+1]);
-            i = i+1; // skip it , the for loop will skip its value                
-        } else {
-            otherArgs[j++] = args[i];
-        }
-    }
-    if (otherArgs.length != 2) {
-      usage();
-    }
-    String serverUri = otherArgs[0];
-    if(otherArgs[1] == null || (
-            !otherArgs[1].equalsIgnoreCase("part") && !otherArgs[1].equalsIgnoreCase("nopart"))
-            && !otherArgs[1].equalsIgnoreCase("nopart_pig")) {
-        usage();
-    }
-    boolean writeToPartitionedTable = (otherArgs[1].equalsIgnoreCase("part"));
-    boolean writeToNonPartPigTable = (otherArgs[1].equalsIgnoreCase("nopart_pig"));
-    String tableName = NUMBERS_TABLE_NAME;
-    String dbName = "default";
-    Map<String, String> outputPartitionKvps = new HashMap<String, String>();
-    String outputTableName = null;
-    conf.set(IS_PIG_NON_PART_TABLE, "false");
-    if(writeToPartitionedTable) {
-        outputTableName = NUMBERS_PARTITIONED_TABLE_NAME;
-        outputPartitionKvps.put("datestamp", "20100101");
-    } else {
-        if(writeToNonPartPigTable) {
-            conf.set(IS_PIG_NON_PART_TABLE, "true");
-            outputTableName = NUMBERS_NON_PARTITIONED_PIG_TABLE_NAME;
-        } else {
-            outputTableName = NUMBERS_NON_PARTITIONED_TABLE_NAME;
-        }
-        // test with null or empty randomly
-        if(new Random().nextInt(2) == 0) {
-            outputPartitionKvps = null;
-        }
-    }
-    
-    String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
-    if(principalID != null)
-    conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
-    Job job = new Job(conf, "storenumbers");
-    
-    // initialize HCatInputFormat
-    HCatInputFormat.setInput(job, InputJobInfo.create(
-    		dbName, tableName, null));
-    // initialize HCatOutputFormat
-    HCatOutputFormat.setOutput(job, OutputJobInfo.create(
-            dbName, outputTableName, outputPartitionKvps));
-    // test with and without specifying schema randomly
-    HCatSchema s = HCatInputFormat.getTableSchema(job);
-    if(writeToNonPartPigTable) {
-        List<HCatFieldSchema> newHfsList = new ArrayList<HCatFieldSchema>();
-        // change smallint and tinyint to int
-        for(HCatFieldSchema hfs: s.getFields()){
-            if(hfs.getTypeString().equals("smallint")) {
-                newHfsList.add(new HCatFieldSchema(hfs.getName(), 
-                        HCatFieldSchema.Type.INT, hfs.getComment()));
-            } else if(hfs.getTypeString().equals("tinyint")) {
-                newHfsList.add(new HCatFieldSchema(hfs.getName(), 
-                        HCatFieldSchema.Type.INT, hfs.getComment()));
+    public static class SumMapper
+        extends Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
+
+        Integer intnum1000;
+        // though id is given as a Short by hcat, the map will emit it as an
+        // IntWritable so we can just sum in the reduce
+        Short id;
+
+        // though intnum5 is handed as a Byte by hcat, the map() will emit it as
+        // an IntWritable so we can just sum in the reduce
+        Byte intnum5;
+        Integer intnum100;
+        Integer intnum;
+        Long longnum;
+        Float floatnum;
+        Double doublenum;
+
+        @Override
+        protected void map(WritableComparable key, HCatRecord value,
+                           org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord,
+                               WritableComparable, HCatRecord>.Context context)
+            throws IOException, InterruptedException {
+            boolean isnoPartPig = context.getConfiguration().getBoolean(IS_PIG_NON_PART_TABLE, false);
+            intnum1000 = ((Integer) value.get(0));
+            id = ((Short) value.get(1));
+            intnum5 = (((Byte) value.get(2)));
+            intnum100 = (((Integer) value.get(3)));
+            intnum = ((Integer) value.get(4));
+            longnum = ((Long) value.get(5));
+            floatnum = ((Float) value.get(6));
+            doublenum = ((Double) value.get(7));
+            HCatRecord output = new DefaultHCatRecord(8);
+            output.set(0, intnum1000 + 10);
+            if (isnoPartPig) {
+                output.set(1, ((int) (id + 10)));
             } else {
-                newHfsList.add(hfs);
+                output.set(1, ((short) (id + 10)));
+            }
+            if (isnoPartPig) {
+                output.set(2, (int) (intnum5 + 10));
+            } else {
+                output.set(2, (byte) (intnum5 + 10));
+            }
+
+            output.set(3, intnum100 + 10);
+            output.set(4, intnum + 10);
+            output.set(5, (long) (longnum + 10));
+            output.set(6, (float) (floatnum + 10));
+            output.set(7, (double) (doublenum + 10));
+            for (int i = 0; i < 8; i++) {
+                System.err.println("XXX: class:" + output.get(i).getClass());
+            }
+            context.write(new IntWritable(0), output);
+
+        }
+    }
+
+
+    public static void main(String[] args) throws Exception {
+        Configuration conf = new Configuration();
+        args = new GenericOptionsParser(conf, args).getRemainingArgs();
+        String[] otherArgs = new String[2];
+        int j = 0;
+        for (int i = 0; i < args.length; i++) {
+            if (args[i].equals("-libjars")) {
+                // generic options parser doesn't seem to work!
+                conf.set("tmpjars", args[i + 1]);
+                i = i + 1; // skip it , the for loop will skip its value
+            } else {
+                otherArgs[j++] = args[i];
             }
         }
-        s = new HCatSchema(newHfsList);
-    } 
-    HCatOutputFormat.setSchema(job, s);
-    
-    
-    job.setInputFormatClass(HCatInputFormat.class);
-    job.setOutputFormatClass(HCatOutputFormat.class);
-    job.setJarByClass(StoreNumbers.class);
-    job.setMapperClass(SumMapper.class);
-    job.setOutputKeyClass(IntWritable.class);
-    job.setNumReduceTasks(0);
-    job.setOutputValueClass(DefaultHCatRecord.class);
-    System.exit(job.waitForCompletion(true) ? 0 : 1);
-  }
+        if (otherArgs.length != 2) {
+            usage();
+        }
+        String serverUri = otherArgs[0];
+        if (otherArgs[1] == null || (
+            !otherArgs[1].equalsIgnoreCase("part") && !otherArgs[1].equalsIgnoreCase("nopart"))
+            && !otherArgs[1].equalsIgnoreCase("nopart_pig")) {
+            usage();
+        }
+        boolean writeToPartitionedTable = (otherArgs[1].equalsIgnoreCase("part"));
+        boolean writeToNonPartPigTable = (otherArgs[1].equalsIgnoreCase("nopart_pig"));
+        String tableName = NUMBERS_TABLE_NAME;
+        String dbName = "default";
+        Map<String, String> outputPartitionKvps = new HashMap<String, String>();
+        String outputTableName = null;
+        conf.set(IS_PIG_NON_PART_TABLE, "false");
+        if (writeToPartitionedTable) {
+            outputTableName = NUMBERS_PARTITIONED_TABLE_NAME;
+            outputPartitionKvps.put("datestamp", "20100101");
+        } else {
+            if (writeToNonPartPigTable) {
+                conf.set(IS_PIG_NON_PART_TABLE, "true");
+                outputTableName = NUMBERS_NON_PARTITIONED_PIG_TABLE_NAME;
+            } else {
+                outputTableName = NUMBERS_NON_PARTITIONED_TABLE_NAME;
+            }
+            // test with null or empty randomly
+            if (new Random().nextInt(2) == 0) {
+                outputPartitionKvps = null;
+            }
+        }
+
+        String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+        if (principalID != null)
+            conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
+        Job job = new Job(conf, "storenumbers");
+
+        // initialize HCatInputFormat
+        HCatInputFormat.setInput(job, InputJobInfo.create(
+            dbName, tableName, null));
+        // initialize HCatOutputFormat
+        HCatOutputFormat.setOutput(job, OutputJobInfo.create(
+            dbName, outputTableName, outputPartitionKvps));
+        // test with and without specifying schema randomly
+        HCatSchema s = HCatInputFormat.getTableSchema(job);
+        if (writeToNonPartPigTable) {
+            List<HCatFieldSchema> newHfsList = new ArrayList<HCatFieldSchema>();
+            // change smallint and tinyint to int
+            for (HCatFieldSchema hfs : s.getFields()) {
+                if (hfs.getTypeString().equals("smallint")) {
+                    newHfsList.add(new HCatFieldSchema(hfs.getName(),
+                        HCatFieldSchema.Type.INT, hfs.getComment()));
+                } else if (hfs.getTypeString().equals("tinyint")) {
+                    newHfsList.add(new HCatFieldSchema(hfs.getName(),
+                        HCatFieldSchema.Type.INT, hfs.getComment()));
+                } else {
+                    newHfsList.add(hfs);
+                }
+            }
+            s = new HCatSchema(newHfsList);
+        }
+        HCatOutputFormat.setSchema(job, s);
+
+
+        job.setInputFormatClass(HCatInputFormat.class);
+        job.setOutputFormatClass(HCatOutputFormat.class);
+        job.setJarByClass(StoreNumbers.class);
+        job.setMapperClass(SumMapper.class);
+        job.setOutputKeyClass(IntWritable.class);
+        job.setNumReduceTasks(0);
+        job.setOutputValueClass(DefaultHCatRecord.class);
+        System.exit(job.waitForCompletion(true) ? 0 : 1);
+    }
 
 
     /**
-     * 
+     *
      */
     private static void usage() {
         System.err.println("Usage: hadoop jar storenumbers <serveruri> <part|nopart|nopart_pig> <-libjars hive-hcat jar>\n" +
-                "\tIf the second argument is \"part\" data is written to datestamp = '2010101' partition of " +
-                "the numbers_part_empty_initially table.\n\tIf the second argument is \"nopart\", data is written to " +
-                "the unpartitioned numbers_nopart_empty_initially table.\n\tIf the second argument is \"nopart_pig\", " +
-                "data is written to the unpartitioned numbers_nopart_pig_empty_initially table.\nt" +
-        "The hcat jar location should be specified as file://<full path to jar>\n");
-    System.exit(2);
-        
+            "\tIf the second argument is \"part\" data is written to datestamp = '2010101' partition of " +
+            "the numbers_part_empty_initially table.\n\tIf the second argument is \"nopart\", data is written to " +
+            "the unpartitioned numbers_nopart_empty_initially table.\n\tIf the second argument is \"nopart_pig\", " +
+            "data is written to the unpartitioned numbers_nopart_pig_empty_initially table.\nt" +
+            "The hcat jar location should be specified as file://<full path to jar>\n");
+        System.exit(2);
+
     }
-   
+
 
 }
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/SumNumbers.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/SumNumbers.java
index a001493..84b5e6d 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/SumNumbers.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/SumNumbers.java
@@ -47,211 +47,211 @@
  * table. It performs a group by on the first column and a SUM operation on the
  * other columns. This is to simulate a typical operation in a map reduce program
  * to test that hcat hands the right data to the map reduce program
- * 
+ *
  * Usage: hadoop jar sumnumbers <serveruri> <output dir> <-libjars hive-hcat jar>
-            The <tab|ctrla> argument controls the output delimiter
-            The hcat jar location should be specified as file://<full path to jar>
+ The <tab|ctrla> argument controls the output delimiter
+ The hcat jar location should be specified as file://<full path to jar>
  */
 public class SumNumbers {
 
     private static final String NUMBERS_TABLE_NAME = "numbers";
     private static final String TAB = "\t";
-    
-  public static class SumMapper 
-       extends Mapper<WritableComparable, HCatRecord, IntWritable, SumNumbers.ArrayWritable>{
-      
-      IntWritable intnum1000;
-      // though id is given as a Short by hcat, the map will emit it as an
-      // IntWritable so we can just sum in the reduce
-      IntWritable id;
-      
-      // though intnum5 is handed as a Byte by hcat, the map() will emit it as
-      // an IntWritable so we can just sum in the reduce
-      IntWritable intnum5;
-      IntWritable intnum100;
-      IntWritable intnum;
-      LongWritable longnum;
-      FloatWritable floatnum;
-      DoubleWritable doublenum;
-    @Override
-  protected void map(WritableComparable key, HCatRecord value, 
-          org.apache.hadoop.mapreduce.Mapper<WritableComparable,HCatRecord,
-          IntWritable,SumNumbers.ArrayWritable>.Context context) 
-    throws IOException ,InterruptedException {
-        intnum1000 = new IntWritable((Integer)value.get(0));
-        id = new IntWritable((Short) value.get(1));
-        intnum5 = new IntWritable(((Byte)value.get(2)));
-        intnum100 = new IntWritable(((Integer) value.get(3)));
-        intnum = new IntWritable((Integer) value.get(4));
-        longnum = new LongWritable((Long) value.get(5));
-        floatnum = new FloatWritable((Float) value.get(6));
-        doublenum = new DoubleWritable((Double) value.get(7));
-        SumNumbers.ArrayWritable outputValue = new SumNumbers.ArrayWritable(id, 
+
+    public static class SumMapper
+        extends Mapper<WritableComparable, HCatRecord, IntWritable, SumNumbers.ArrayWritable> {
+
+        IntWritable intnum1000;
+        // though id is given as a Short by hcat, the map will emit it as an
+        // IntWritable so we can just sum in the reduce
+        IntWritable id;
+
+        // though intnum5 is handed as a Byte by hcat, the map() will emit it as
+        // an IntWritable so we can just sum in the reduce
+        IntWritable intnum5;
+        IntWritable intnum100;
+        IntWritable intnum;
+        LongWritable longnum;
+        FloatWritable floatnum;
+        DoubleWritable doublenum;
+
+        @Override
+        protected void map(WritableComparable key, HCatRecord value,
+                           org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord,
+                               IntWritable, SumNumbers.ArrayWritable>.Context context)
+            throws IOException, InterruptedException {
+            intnum1000 = new IntWritable((Integer) value.get(0));
+            id = new IntWritable((Short) value.get(1));
+            intnum5 = new IntWritable(((Byte) value.get(2)));
+            intnum100 = new IntWritable(((Integer) value.get(3)));
+            intnum = new IntWritable((Integer) value.get(4));
+            longnum = new LongWritable((Long) value.get(5));
+            floatnum = new FloatWritable((Float) value.get(6));
+            doublenum = new DoubleWritable((Double) value.get(7));
+            SumNumbers.ArrayWritable outputValue = new SumNumbers.ArrayWritable(id,
                 intnum5, intnum100, intnum, longnum, floatnum, doublenum);
-        context.write(intnum1000, outputValue);
+            context.write(intnum1000, outputValue);
 
-    }
-  }
-  
-  public static class SumReducer extends Reducer<IntWritable, SumNumbers.ArrayWritable, 
-  LongWritable, Text> {
-      
-      
-    LongWritable dummyLong = null;
-      @Override
-    protected void reduce(IntWritable key, java.lang.Iterable<ArrayWritable> 
-      values, org.apache.hadoop.mapreduce.Reducer<IntWritable,ArrayWritable,LongWritable,Text>.Context context) 
-      throws IOException ,InterruptedException {
-          String output = key.toString() + TAB;
-          Long sumid = 0l;
-          Long sumintnum5 = 0l;
-          Long sumintnum100 = 0l;
-          Long sumintnum = 0l;
-          Long sumlongnum = 0l;
-          Float sumfloatnum = 0.0f;
-          Double sumdoublenum = 0.0;
-          for (ArrayWritable value : values) {
-            sumid += value.id.get();
-            sumintnum5 += value.intnum5.get();
-            sumintnum100 += value.intnum100.get();
-            sumintnum += value.intnum.get();
-            sumlongnum += value.longnum.get();
-            sumfloatnum += value.floatnum.get();
-            sumdoublenum += value.doublenum.get();
-        }
-          output += sumid + TAB;
-          output += sumintnum5 + TAB;
-          output += sumintnum100 + TAB;
-          output += sumintnum + TAB;
-          output += sumlongnum + TAB;
-          output += sumfloatnum + TAB;
-          output += sumdoublenum + TAB;
-          context.write(dummyLong, new Text(output));
-      }
-  }
-  
-   public static void main(String[] args) throws Exception {
-    Configuration conf = new Configuration();
-    args = new GenericOptionsParser(conf, args).getRemainingArgs();
-    String[] otherArgs = new String[4];
-    int j = 0;
-    for(int i = 0; i < args.length; i++) {
-        if(args[i].equals("-libjars")) {
-            // generic options parser doesn't seem to work!
-            conf.set("tmpjars", args[i+1]);
-            i = i+1; // skip it , the for loop will skip its value                
-        } else {
-            otherArgs[j++] = args[i];
         }
     }
-    if (otherArgs.length != 4) {
-      System.err.println("Usage: hadoop jar sumnumbers <serveruri> <output dir> <-libjars hive-hcat jar>\n" +
-            "The <tab|ctrla> argument controls the output delimiter.\n" +
-            "The hcat jar location should be specified as file://<full path to jar>\n");
-      System.exit(2);
-    }
-    String serverUri = otherArgs[0];
-    String tableName = NUMBERS_TABLE_NAME;
-    String outputDir = otherArgs[1];
-    String dbName = "default";
-    
-    String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
-    if(principalID != null)
-    conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
-    Job job = new Job(conf, "sumnumbers");
-    HCatInputFormat.setInput(job, InputJobInfo.create(
-    		dbName, tableName, null));
-    // initialize HCatOutputFormat
-    
-    job.setInputFormatClass(HCatInputFormat.class);
-    job.setOutputFormatClass(TextOutputFormat.class);
-    job.setJarByClass(SumNumbers.class);
-    job.setMapperClass(SumMapper.class);
-    job.setReducerClass(SumReducer.class);
-    job.setMapOutputKeyClass(IntWritable.class);
-    job.setMapOutputValueClass(ArrayWritable.class);
-    job.setOutputKeyClass(LongWritable.class);
-    job.setOutputValueClass(Text.class);
-    FileOutputFormat.setOutputPath(job, new Path(outputDir));
-    System.exit(job.waitForCompletion(true) ? 0 : 1);
-  }
-   
-   public static class ArrayWritable implements Writable {
 
-       // though id is given as a Short by hcat, the map will emit it as an
-       // IntWritable so we can just sum in the reduce
-       IntWritable id;
-       
-       // though intnum5 is handed as a Byte by hcat, the map() will emit it as
-       // an IntWritable so we can just sum in the reduce
-       IntWritable intnum5;
-       
-       IntWritable intnum100;
-       IntWritable intnum;
-       LongWritable longnum;
-       FloatWritable floatnum;
-       DoubleWritable doublenum;
-       
-       /**
-     * 
-     */
-    public ArrayWritable() {
-        id = new IntWritable();
-        intnum5 = new IntWritable();
-        intnum100 = new IntWritable();
-        intnum = new IntWritable();
-        longnum = new LongWritable();
-        floatnum = new FloatWritable();
-        doublenum = new DoubleWritable();
-    }
-    
-    
-       
-    /**
-     * @param id
-     * @param intnum5
-     * @param intnum100
-     * @param intnum
-     * @param longnum
-     * @param floatnum
-     * @param doublenum
-     */
-    public ArrayWritable(IntWritable id, IntWritable intnum5,
-            IntWritable intnum100, IntWritable intnum, LongWritable longnum,
-            FloatWritable floatnum, DoubleWritable doublenum) {
-        this.id = id;
-        this.intnum5 = intnum5;
-        this.intnum100 = intnum100;
-        this.intnum = intnum;
-        this.longnum = longnum;
-        this.floatnum = floatnum;
-        this.doublenum = doublenum;
-    }
+    public static class SumReducer extends Reducer<IntWritable, SumNumbers.ArrayWritable,
+        LongWritable, Text> {
 
 
+        LongWritable dummyLong = null;
 
-    @Override
-    public void readFields(DataInput in) throws IOException {
-        id.readFields(in);
-        intnum5.readFields(in);
-        intnum100.readFields(in);
-        intnum.readFields(in);
-        longnum.readFields(in);
-        floatnum.readFields(in);
-        doublenum.readFields(in);
+        @Override
+        protected void reduce(IntWritable key, java.lang.Iterable<ArrayWritable>
+            values, org.apache.hadoop.mapreduce.Reducer<IntWritable, ArrayWritable, LongWritable, Text>.Context context)
+            throws IOException, InterruptedException {
+            String output = key.toString() + TAB;
+            Long sumid = 0l;
+            Long sumintnum5 = 0l;
+            Long sumintnum100 = 0l;
+            Long sumintnum = 0l;
+            Long sumlongnum = 0l;
+            Float sumfloatnum = 0.0f;
+            Double sumdoublenum = 0.0;
+            for (ArrayWritable value : values) {
+                sumid += value.id.get();
+                sumintnum5 += value.intnum5.get();
+                sumintnum100 += value.intnum100.get();
+                sumintnum += value.intnum.get();
+                sumlongnum += value.longnum.get();
+                sumfloatnum += value.floatnum.get();
+                sumdoublenum += value.doublenum.get();
+            }
+            output += sumid + TAB;
+            output += sumintnum5 + TAB;
+            output += sumintnum100 + TAB;
+            output += sumintnum + TAB;
+            output += sumlongnum + TAB;
+            output += sumfloatnum + TAB;
+            output += sumdoublenum + TAB;
+            context.write(dummyLong, new Text(output));
+        }
     }
 
-    @Override
-    public void write(DataOutput out) throws IOException {
-        id.write(out);
-        intnum5.write(out);
-        intnum100.write(out);
-        intnum.write(out);
-        longnum.write(out);
-        floatnum.write(out);
-        doublenum.write(out);
-        
+    public static void main(String[] args) throws Exception {
+        Configuration conf = new Configuration();
+        args = new GenericOptionsParser(conf, args).getRemainingArgs();
+        String[] otherArgs = new String[4];
+        int j = 0;
+        for (int i = 0; i < args.length; i++) {
+            if (args[i].equals("-libjars")) {
+                // generic options parser doesn't seem to work!
+                conf.set("tmpjars", args[i + 1]);
+                i = i + 1; // skip it , the for loop will skip its value
+            } else {
+                otherArgs[j++] = args[i];
+            }
+        }
+        if (otherArgs.length != 4) {
+            System.err.println("Usage: hadoop jar sumnumbers <serveruri> <output dir> <-libjars hive-hcat jar>\n" +
+                "The <tab|ctrla> argument controls the output delimiter.\n" +
+                "The hcat jar location should be specified as file://<full path to jar>\n");
+            System.exit(2);
+        }
+        String serverUri = otherArgs[0];
+        String tableName = NUMBERS_TABLE_NAME;
+        String outputDir = otherArgs[1];
+        String dbName = "default";
+
+        String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+        if (principalID != null)
+            conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
+        Job job = new Job(conf, "sumnumbers");
+        HCatInputFormat.setInput(job, InputJobInfo.create(
+            dbName, tableName, null));
+        // initialize HCatOutputFormat
+
+        job.setInputFormatClass(HCatInputFormat.class);
+        job.setOutputFormatClass(TextOutputFormat.class);
+        job.setJarByClass(SumNumbers.class);
+        job.setMapperClass(SumMapper.class);
+        job.setReducerClass(SumReducer.class);
+        job.setMapOutputKeyClass(IntWritable.class);
+        job.setMapOutputValueClass(ArrayWritable.class);
+        job.setOutputKeyClass(LongWritable.class);
+        job.setOutputValueClass(Text.class);
+        FileOutputFormat.setOutputPath(job, new Path(outputDir));
+        System.exit(job.waitForCompletion(true) ? 0 : 1);
     }
-       
-   }
+
+    public static class ArrayWritable implements Writable {
+
+        // though id is given as a Short by hcat, the map will emit it as an
+        // IntWritable so we can just sum in the reduce
+        IntWritable id;
+
+        // though intnum5 is handed as a Byte by hcat, the map() will emit it as
+        // an IntWritable so we can just sum in the reduce
+        IntWritable intnum5;
+
+        IntWritable intnum100;
+        IntWritable intnum;
+        LongWritable longnum;
+        FloatWritable floatnum;
+        DoubleWritable doublenum;
+
+        /**
+         *
+         */
+        public ArrayWritable() {
+            id = new IntWritable();
+            intnum5 = new IntWritable();
+            intnum100 = new IntWritable();
+            intnum = new IntWritable();
+            longnum = new LongWritable();
+            floatnum = new FloatWritable();
+            doublenum = new DoubleWritable();
+        }
+
+
+        /**
+         * @param id
+         * @param intnum5
+         * @param intnum100
+         * @param intnum
+         * @param longnum
+         * @param floatnum
+         * @param doublenum
+         */
+        public ArrayWritable(IntWritable id, IntWritable intnum5,
+                             IntWritable intnum100, IntWritable intnum, LongWritable longnum,
+                             FloatWritable floatnum, DoubleWritable doublenum) {
+            this.id = id;
+            this.intnum5 = intnum5;
+            this.intnum100 = intnum100;
+            this.intnum = intnum;
+            this.longnum = longnum;
+            this.floatnum = floatnum;
+            this.doublenum = doublenum;
+        }
+
+
+        @Override
+        public void readFields(DataInput in) throws IOException {
+            id.readFields(in);
+            intnum5.readFields(in);
+            intnum100.readFields(in);
+            intnum.readFields(in);
+            longnum.readFields(in);
+            floatnum.readFields(in);
+            doublenum.readFields(in);
+        }
+
+        @Override
+        public void write(DataOutput out) throws IOException {
+            id.write(out);
+            intnum5.write(out);
+            intnum100.write(out);
+            intnum.write(out);
+            longnum.write(out);
+            floatnum.write(out);
+            doublenum.write(out);
+
+        }
+
+    }
 }
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/TypeDataCheck.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/TypeDataCheck.java
index 44ede23..3eeb2e4 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/TypeDataCheck.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/TypeDataCheck.java
@@ -44,138 +44,139 @@
  * objective by checking the type of the Objects representing the columns against
  * the schema provided as a cmdline arg. It achieves the second objective by
  * writing the data as Text to be compared against golden results.
- * 
+ *
  * The schema specification consists of the types as given by "describe <table>"
  * with each column's type separated from the next column's type by a '+'
- * 
+ *
  * Can be used against "numbers" and "complex" tables.
- * 
+ *
  * Usage: hadoop jar testudf.jar typedatacheck <serveruri> <tablename> 
  * <hive types of cols + delimited> <output dir> <tab|ctrla> <-libjars hive-hcat jar>
-            The <tab|ctrla> argument controls the output delimiter.
-            The hcat jar location should be specified as file://<full path to jar>
+ The <tab|ctrla> argument controls the output delimiter.
+ The hcat jar location should be specified as file://<full path to jar>
  */
-public class TypeDataCheck implements Tool{
+public class TypeDataCheck implements Tool {
 
-	static String SCHEMA_KEY = "schema";
-	static String DELIM = "delim";
-	private static Configuration conf = new Configuration();
+    static String SCHEMA_KEY = "schema";
+    static String DELIM = "delim";
+    private static Configuration conf = new Configuration();
 
-	public static class TypeDataCheckMapper 
-	extends Mapper<WritableComparable, HCatRecord, Long, Text>{
+    public static class TypeDataCheckMapper
+        extends Mapper<WritableComparable, HCatRecord, Long, Text> {
 
-		Long dummykey = null;
-		String[] types;
-		String delim = "\u0001";      
-		@Override
-		protected void setup(org.apache.hadoop.mapreduce.Mapper<WritableComparable,HCatRecord,Long,Text>.Context context) 
-		throws IOException ,InterruptedException {
-			String typesStr = context.getConfiguration().get(SCHEMA_KEY);
-			delim = context.getConfiguration().get(DELIM);
-			if(delim.equals("tab")) {
-				delim = "\t";
-			} else if (delim.equals("ctrla")) {
-				delim = "\u0001";
-			}
-			types = typesStr.split("\\+");
-			for(int i = 0; i < types.length; i++) {
-				types[i] = types[i].toLowerCase();
-			}
+        Long dummykey = null;
+        String[] types;
+        String delim = "\u0001";
+
+        @Override
+        protected void setup(org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, Long, Text>.Context context)
+            throws IOException, InterruptedException {
+            String typesStr = context.getConfiguration().get(SCHEMA_KEY);
+            delim = context.getConfiguration().get(DELIM);
+            if (delim.equals("tab")) {
+                delim = "\t";
+            } else if (delim.equals("ctrla")) {
+                delim = "\u0001";
+            }
+            types = typesStr.split("\\+");
+            for (int i = 0; i < types.length; i++) {
+                types[i] = types[i].toLowerCase();
+            }
 
 
-		}
+        }
 
-		String check(HCatRecord r) throws IOException {
-			String s = "";
-			for(int i = 0; i < r.size(); i++) {
-				s += Util.check(types[i], r.get(i));
-				if(i != r.size() - 1) {
-					s += delim;
-				}
-			}
-			return s;
-		}
+        String check(HCatRecord r) throws IOException {
+            String s = "";
+            for (int i = 0; i < r.size(); i++) {
+                s += Util.check(types[i], r.get(i));
+                if (i != r.size() - 1) {
+                    s += delim;
+                }
+            }
+            return s;
+        }
 
-		@Override
-		protected void map(WritableComparable key, HCatRecord value, 
-				org.apache.hadoop.mapreduce.Mapper<WritableComparable,HCatRecord,Long,Text>.Context context) 
-		throws IOException ,InterruptedException {
-			context.write(dummykey, new Text(check(value)));
-		}
-	}
+        @Override
+        protected void map(WritableComparable key, HCatRecord value,
+                           org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, Long, Text>.Context context)
+            throws IOException, InterruptedException {
+            context.write(dummykey, new Text(check(value)));
+        }
+    }
 
-	public static void main(String[] args) throws Exception {
-		TypeDataCheck self = new TypeDataCheck();
-		System.exit(ToolRunner.run(conf, self, args));
-	}
+    public static void main(String[] args) throws Exception {
+        TypeDataCheck self = new TypeDataCheck();
+        System.exit(ToolRunner.run(conf, self, args));
+    }
 
-	public int run(String[] args) {
-		try {
-			args = new GenericOptionsParser(conf, args).getRemainingArgs();
-			String[] otherArgs = new String[5];
-			int j = 0;
-			for(int i = 0; i < args.length; i++) {
-				if(args[i].equals("-libjars")) {
-                                        conf.set("tmpjars",args[i+1]);
-					i = i+1; // skip it , the for loop will skip its value                
-				} else {
-					otherArgs[j++] = args[i];
-				}
-			}
-			if (otherArgs.length !=5 ) {
-				System.err.println("Other args:" + Arrays.asList(otherArgs));
-				System.err.println("Usage: hadoop jar testudf.jar typedatacheck " +
-						"<serveruri> <tablename> <hive types of cols + delimited> " +
-						"<output dir> <tab|ctrla> <-libjars hive-hcat jar>\n" +
-						"The <tab|ctrla> argument controls the output delimiter.\n" +
-				"The hcat jar location should be specified as file://<full path to jar>\n");
-				System.err.println(" The <tab|ctrla> argument controls the output delimiter.");
-				System.exit(2);
-			}
-			String serverUri = otherArgs[0];
-			String tableName = otherArgs[1];
-			String schemaStr = otherArgs[2];
-			String outputDir = otherArgs[3];
-			String outputdelim = otherArgs[4];
-			if(!outputdelim.equals("tab") && !outputdelim.equals("ctrla")) {
-				System.err.println("ERROR: Specify 'tab' or 'ctrla' for output delimiter");
-			}
-			String dbName = "default";
+    public int run(String[] args) {
+        try {
+            args = new GenericOptionsParser(conf, args).getRemainingArgs();
+            String[] otherArgs = new String[5];
+            int j = 0;
+            for (int i = 0; i < args.length; i++) {
+                if (args[i].equals("-libjars")) {
+                    conf.set("tmpjars", args[i + 1]);
+                    i = i + 1; // skip it , the for loop will skip its value
+                } else {
+                    otherArgs[j++] = args[i];
+                }
+            }
+            if (otherArgs.length != 5) {
+                System.err.println("Other args:" + Arrays.asList(otherArgs));
+                System.err.println("Usage: hadoop jar testudf.jar typedatacheck " +
+                    "<serveruri> <tablename> <hive types of cols + delimited> " +
+                    "<output dir> <tab|ctrla> <-libjars hive-hcat jar>\n" +
+                    "The <tab|ctrla> argument controls the output delimiter.\n" +
+                    "The hcat jar location should be specified as file://<full path to jar>\n");
+                System.err.println(" The <tab|ctrla> argument controls the output delimiter.");
+                System.exit(2);
+            }
+            String serverUri = otherArgs[0];
+            String tableName = otherArgs[1];
+            String schemaStr = otherArgs[2];
+            String outputDir = otherArgs[3];
+            String outputdelim = otherArgs[4];
+            if (!outputdelim.equals("tab") && !outputdelim.equals("ctrla")) {
+                System.err.println("ERROR: Specify 'tab' or 'ctrla' for output delimiter");
+            }
+            String dbName = "default";
 
-			String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
-			if(principalID != null){
-				conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);				
-			}
-			Job job = new Job(conf, "typedatacheck");
-			// initialize HCatInputFormat
-			HCatInputFormat.setInput(job, InputJobInfo.create(
-					dbName, tableName, null));
-			HCatSchema s = HCatInputFormat.getTableSchema(job);
-			job.getConfiguration().set(SCHEMA_KEY, schemaStr);
-			job.getConfiguration().set(DELIM, outputdelim);
-			job.setInputFormatClass(HCatInputFormat.class);
-			job.setOutputFormatClass(TextOutputFormat.class);
-			job.setJarByClass(TypeDataCheck.class);
-			job.setMapperClass(TypeDataCheckMapper.class);
-			job.setNumReduceTasks(0);
-			job.setOutputKeyClass(Long.class);
-			job.setOutputValueClass(Text.class);
-			FileOutputFormat.setOutputPath(job, new Path(outputDir));
-			System.exit(job.waitForCompletion(true) ? 0 : 1);
-			return 0;
-		} catch (Exception e) {
-			throw new RuntimeException(e);
-		}
-	}
+            String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+            if (principalID != null) {
+                conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
+            }
+            Job job = new Job(conf, "typedatacheck");
+            // initialize HCatInputFormat
+            HCatInputFormat.setInput(job, InputJobInfo.create(
+                dbName, tableName, null));
+            HCatSchema s = HCatInputFormat.getTableSchema(job);
+            job.getConfiguration().set(SCHEMA_KEY, schemaStr);
+            job.getConfiguration().set(DELIM, outputdelim);
+            job.setInputFormatClass(HCatInputFormat.class);
+            job.setOutputFormatClass(TextOutputFormat.class);
+            job.setJarByClass(TypeDataCheck.class);
+            job.setMapperClass(TypeDataCheckMapper.class);
+            job.setNumReduceTasks(0);
+            job.setOutputKeyClass(Long.class);
+            job.setOutputValueClass(Text.class);
+            FileOutputFormat.setOutputPath(job, new Path(outputDir));
+            System.exit(job.waitForCompletion(true) ? 0 : 1);
+            return 0;
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
 
-	@Override
-	public Configuration getConf() {
-		return conf;
-	}
+    @Override
+    public Configuration getConf() {
+        return conf;
+    }
 
-	@Override
-	public void setConf(Configuration conf) {
-		TypeDataCheck.conf = conf;
-	}
+    @Override
+    public void setConf(Configuration conf) {
+        TypeDataCheck.conf = conf;
+    }
 
 }
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/Util.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/Util.java
index 8e34116..1876069 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/Util.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/Util.java
@@ -27,8 +27,8 @@
 public class Util {
 
     static Map<String, Class<?>> typeMap = new HashMap<String, Class<?>>();
-    
-    static{
+
+    static {
         typeMap.put("tinyint", Byte.class);
         typeMap.put("smallint", Short.class);
         typeMap.put("int", Integer.class);
@@ -41,31 +41,31 @@
         typeMap.put("map<string,string>", Map.class);
         typeMap.put("array<map<string,string>>", List.class);
     }
-    
+
     public static void die(String expectedType, Object o) throws IOException {
-        throw new IOException("Expected " + expectedType + ", got " +  
-              o.getClass().getName());
+        throw new IOException("Expected " + expectedType + ", got " +
+            o.getClass().getName());
     }
-    
-    
+
+
     public static String check(String type, Object o) throws IOException {
-        if(o == null) {
+        if (o == null) {
             return "null";
         }
-        if(check(typeMap.get(type), o)) {
-            if(type.equals("map<string,string>")) {
+        if (check(typeMap.get(type), o)) {
+            if (type.equals("map<string,string>")) {
                 Map<String, String> m = (Map<String, String>) o;
                 check(m);
-            } else if(type.equals("array<map<string,string>>")) {
+            } else if (type.equals("array<map<string,string>>")) {
                 List<Map<String, String>> listOfMaps = (List<Map<String, String>>) o;
-                for(Map<String, String> m: listOfMaps) {
+                for (Map<String, String> m : listOfMaps) {
                     check(m);
                 }
-            } else if(type.equals("struct<num:int,str:string,dbl:double>")) {
+            } else if (type.equals("struct<num:int,str:string,dbl:double>")) {
                 List<Object> l = (List<Object>) o;
-                if(!check(Integer.class, l.get(0)) ||
-                        !check(String.class, l.get(1)) ||
-                                !check(Double.class, l.get(2))) {
+                if (!check(Integer.class, l.get(0)) ||
+                    !check(String.class, l.get(1)) ||
+                    !check(Double.class, l.get(2))) {
                     die("struct<num:int,str:string,dbl:double>", l);
                 }
             }
@@ -74,32 +74,32 @@
         }
         return o.toString();
     }
-    
-    /**
-   * @param m
-   * @throws IOException 
-   */
-  public static void check(Map<String, String> m) throws IOException {
-      if(m == null) {
-          return;
-      }
-      for(Entry<String, String> e: m.entrySet()) {
-          // just access key and value to ensure they are correct
-          if(!check(String.class, e.getKey())) {
-              die("String", e.getKey());
-          }
-          if(!check(String.class, e.getValue())) {
-              die("String", e.getValue());
-          }
-      }
-      
-  }
 
-  public static boolean check(Class<?> expected, Object actual) {
-        if(actual == null) {
+    /**
+     * @param m
+     * @throws IOException
+     */
+    public static void check(Map<String, String> m) throws IOException {
+        if (m == null) {
+            return;
+        }
+        for (Entry<String, String> e : m.entrySet()) {
+            // just access key and value to ensure they are correct
+            if (!check(String.class, e.getKey())) {
+                die("String", e.getKey());
+            }
+            if (!check(String.class, e.getValue())) {
+                die("String", e.getValue());
+            }
+        }
+
+    }
+
+    public static boolean check(Class<?> expected, Object actual) {
+        if (actual == null) {
             return true;
         }
         return expected.isAssignableFrom(actual.getClass());
     }
-    
+
 }
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteJson.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteJson.java
index 994feec..1426162 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteJson.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteJson.java
@@ -42,7 +42,7 @@
  * table. It performs a group by on the first column and a SUM operation on the
  * other columns. This is to simulate a typical operation in a map reduce
  * program to test that hcat hands the right data to the map reduce program
- * 
+ *
  * Usage: hadoop jar sumnumbers <serveruri> <output dir> <-libjars hive-hcat
  * jar> The <tab|ctrla> argument controls the output delimiter The hcat jar
  * location should be specified as file://<full path to jar>
@@ -50,7 +50,7 @@
 public class WriteJson extends Configured implements Tool {
 
     public static class Map extends
-            Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
+        Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
 
         String s;
         Integer i;
@@ -58,19 +58,19 @@
 
         @Override
         protected void map(
-                WritableComparable key,
-                HCatRecord value,
-                org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>.Context context)
-                throws IOException, InterruptedException {
-            s = value.get(0)==null?null:(String)value.get(0);
-            i = value.get(1)==null?null:(Integer)value.get(1);
-            d = value.get(2)==null?null:(Double)value.get(2);
-            
+            WritableComparable key,
+            HCatRecord value,
+            org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>.Context context)
+            throws IOException, InterruptedException {
+            s = value.get(0) == null ? null : (String) value.get(0);
+            i = value.get(1) == null ? null : (Integer) value.get(1);
+            d = value.get(2) == null ? null : (Double) value.get(2);
+
             HCatRecord record = new DefaultHCatRecord(5);
             record.set(0, s);
             record.set(1, i);
             record.set(2, d);
-            
+
             context.write(null, record);
 
         }
@@ -86,12 +86,12 @@
         String dbName = null;
 
         String principalID = System
-                .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+            .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
         if (principalID != null)
             conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
         Job job = new Job(conf, "WriteJson");
         HCatInputFormat.setInput(job, InputJobInfo.create(dbName,
-                inputTableName, null));
+            inputTableName, null));
         // initialize HCatOutputFormat
 
         job.setInputFormatClass(HCatInputFormat.class);
@@ -101,10 +101,10 @@
         job.setOutputValueClass(DefaultHCatRecord.class);
         job.setNumReduceTasks(0);
         HCatOutputFormat.setOutput(job, OutputJobInfo.create(dbName,
-                outputTableName, null));
+            outputTableName, null));
         HCatSchema s = HCatInputFormat.getTableSchema(job);
         System.err.println("INFO: output schema explicitly set for writing:"
-                + s);
+            + s);
         HCatOutputFormat.setSchema(job, s);
         job.setOutputFormatClass(HCatOutputFormat.class);
         return (job.waitForCompletion(true) ? 0 : 1);
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteRC.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteRC.java
index c3e84d5..b4dfdfa 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteRC.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteRC.java
@@ -42,7 +42,7 @@
  * table. It performs a group by on the first column and a SUM operation on the
  * other columns. This is to simulate a typical operation in a map reduce
  * program to test that hcat hands the right data to the map reduce program
- * 
+ *
  * Usage: hadoop jar sumnumbers <serveruri> <output dir> <-libjars hive-hcat
  * jar> The <tab|ctrla> argument controls the output delimiter The hcat jar
  * location should be specified as file://<full path to jar>
@@ -50,29 +50,29 @@
 public class WriteRC extends Configured implements Tool {
 
     public static class Map extends
-            Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
+        Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
 
         String name;
         Integer age;
         Double gpa;
-        
+
         @Override
         protected void map(
-                WritableComparable key,
-                HCatRecord value,
-                org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>.Context context)
-                throws IOException, InterruptedException {
-            name = value.get(0)==null?null:(String)value.get(0);
-            age = value.get(1)==null?null:(Integer)value.get(1);
-            gpa = value.get(2)==null?null:(Double)value.get(2);
+            WritableComparable key,
+            HCatRecord value,
+            org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>.Context context)
+            throws IOException, InterruptedException {
+            name = value.get(0) == null ? null : (String) value.get(0);
+            age = value.get(1) == null ? null : (Integer) value.get(1);
+            gpa = value.get(2) == null ? null : (Double) value.get(2);
 
             if (gpa != null) gpa = Math.floor(gpa) + 0.1;
-            
+
             HCatRecord record = new DefaultHCatRecord(5);
             record.set(0, name);
             record.set(1, age);
             record.set(2, gpa);
-            
+
             context.write(null, record);
 
         }
@@ -88,12 +88,12 @@
         String dbName = null;
 
         String principalID = System
-                .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+            .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
         if (principalID != null)
             conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
         Job job = new Job(conf, "WriteRC");
         HCatInputFormat.setInput(job, InputJobInfo.create(dbName,
-                inputTableName, null));
+            inputTableName, null));
         // initialize HCatOutputFormat
 
         job.setInputFormatClass(HCatInputFormat.class);
@@ -103,10 +103,10 @@
         job.setOutputValueClass(DefaultHCatRecord.class);
         job.setNumReduceTasks(0);
         HCatOutputFormat.setOutput(job, OutputJobInfo.create(dbName,
-                outputTableName, null));
+            outputTableName, null));
         HCatSchema s = HCatInputFormat.getTableSchema(job);
         System.err.println("INFO: output schema explicitly set for writing:"
-                + s);
+            + s);
         HCatOutputFormat.setSchema(job, s);
         job.setOutputFormatClass(HCatOutputFormat.class);
         return (job.waitForCompletion(true) ? 0 : 1);
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteText.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteText.java
index de47cf2..4662739 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteText.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteText.java
@@ -42,7 +42,7 @@
  * table. It performs a group by on the first column and a SUM operation on the
  * other columns. This is to simulate a typical operation in a map reduce
  * program to test that hcat hands the right data to the map reduce program
- * 
+ *
  * Usage: hadoop jar sumnumbers <serveruri> <output dir> <-libjars hive-hcat
  * jar> The <tab|ctrla> argument controls the output delimiter The hcat jar
  * location should be specified as file://<full path to jar>
@@ -50,7 +50,7 @@
 public class WriteText extends Configured implements Tool {
 
     public static class Map extends
-            Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
+        Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
 
         byte t;
         short si;
@@ -62,18 +62,18 @@
 
         @Override
         protected void map(
-                WritableComparable key,
-                HCatRecord value,
-                org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>.Context context)
-                throws IOException, InterruptedException {
-            t = (Byte)value.get(0);
-            si = (Short)value.get(1);
-            i = (Integer)value.get(2);
-            b = (Long)value.get(3);
-            f = (Float)value.get(4);
-            d = (Double)value.get(5);
-            s = (String)value.get(6);
-            
+            WritableComparable key,
+            HCatRecord value,
+            org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>.Context context)
+            throws IOException, InterruptedException {
+            t = (Byte) value.get(0);
+            si = (Short) value.get(1);
+            i = (Integer) value.get(2);
+            b = (Long) value.get(3);
+            f = (Float) value.get(4);
+            d = (Double) value.get(5);
+            s = (String) value.get(6);
+
             HCatRecord record = new DefaultHCatRecord(7);
             record.set(0, t);
             record.set(1, si);
@@ -82,7 +82,7 @@
             record.set(4, f);
             record.set(5, d);
             record.set(6, s);
-            
+
             context.write(null, record);
 
         }
@@ -98,12 +98,12 @@
         String dbName = null;
 
         String principalID = System
-                .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+            .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
         if (principalID != null)
             conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
         Job job = new Job(conf, "WriteText");
         HCatInputFormat.setInput(job, InputJobInfo.create(dbName,
-                inputTableName, null));
+            inputTableName, null));
         // initialize HCatOutputFormat
 
         job.setInputFormatClass(HCatInputFormat.class);
@@ -113,10 +113,10 @@
         job.setOutputValueClass(DefaultHCatRecord.class);
         job.setNumReduceTasks(0);
         HCatOutputFormat.setOutput(job, OutputJobInfo.create(dbName,
-                outputTableName, null));
+            outputTableName, null));
         HCatSchema s = HCatInputFormat.getTableSchema(job);
         System.err.println("INFO: output schema explicitly set for writing:"
-                + s);
+            + s);
         HCatOutputFormat.setSchema(job, s);
         job.setOutputFormatClass(HCatOutputFormat.class);
         return (job.waitForCompletion(true) ? 0 : 1);
diff --git a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteTextPartitioned.java b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteTextPartitioned.java
index a2e6167..1854082 100644
--- a/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteTextPartitioned.java
+++ b/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteTextPartitioned.java
@@ -46,7 +46,7 @@
  * table. It performs a group by on the first column and a SUM operation on the
  * other columns. This is to simulate a typical operation in a map reduce
  * program to test that hcat hands the right data to the map reduce program
- * 
+ *
  * Usage: hadoop jar org.apache.hcatalog.utils.HBaseReadWrite -libjars
  * &lt;hcat_jar&gt; * &lt;serveruri&gt; &lt;input_tablename&gt; &lt;output_tablename&gt; [filter]
  * If filter is given it will be provided as the partition to write to.
@@ -56,23 +56,23 @@
     static String filter = null;
 
     public static class Map extends
-            Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
+        Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
 
         @Override
         protected void map(
-                WritableComparable key,
-                HCatRecord value,
-                org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>.Context context)
-                throws IOException, InterruptedException {
-            String name = (String)value.get(0);
-            int age = (Integer)value.get(1);
-            String ds = (String)value.get(3);
-            
+            WritableComparable key,
+            HCatRecord value,
+            org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>.Context context)
+            throws IOException, InterruptedException {
+            String name = (String) value.get(0);
+            int age = (Integer) value.get(1);
+            String ds = (String) value.get(3);
+
             HCatRecord record = (filter == null ? new DefaultHCatRecord(3) : new DefaultHCatRecord(2));
             record.set(0, name);
             record.set(1, age);
             if (filter == null) record.set(2, ds);
-            
+
             context.write(null, record);
 
         }
@@ -89,12 +89,12 @@
         String dbName = null;
 
         String principalID = System
-                .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+            .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
         if (principalID != null)
             conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
         Job job = new Job(conf, "WriteTextPartitioned");
         HCatInputFormat.setInput(job, InputJobInfo.create(dbName,
-                inputTableName, filter));
+            inputTableName, filter));
         // initialize HCatOutputFormat
 
         job.setInputFormatClass(HCatInputFormat.class);
@@ -112,7 +112,7 @@
             partitionVals.put(s[0], val);
         }
         HCatOutputFormat.setOutput(job, OutputJobInfo.create(dbName,
-                outputTableName, partitionVals));
+            outputTableName, partitionVals));
         HCatSchema s = HCatInputFormat.getTableSchema(job);
         // Build the schema for this table, which is slightly different than the
         // schema for the input table
diff --git a/src/test/e2e/templeton/build.xml b/src/test/e2e/templeton/build.xml
index fe74212..ed85eba 100644
--- a/src/test/e2e/templeton/build.xml
+++ b/src/test/e2e/templeton/build.xml
@@ -17,77 +17,76 @@
 
 <project name="TestHarnessTempletonTests" default="test">
 
-  <!-- Separate property name for udfs' build.xml -->
-  <property name="e2e.lib.dir" value="${basedir}/lib"/>
+    <!-- Separate property name for udfs' build.xml -->
+    <property name="e2e.lib.dir" value="${basedir}/lib"/>
 
-  <property name="test.src" value="${basedir}/tests"/>
-  <property name="driver.src" value="${basedir}/drivers"/>
-  <property name="harness.dir" value="${basedir}/../harness"/>
-  <property name="inpdir.local" value="${basedir}/inpdir/"/>
-  <property name="test.location" value="${basedir}/testWorkDir"/>
-  <property name="driver.src" value="${basedir}/drivers"/>
+    <property name="test.src" value="${basedir}/tests"/>
+    <property name="driver.src" value="${basedir}/drivers"/>
+    <property name="harness.dir" value="${basedir}/../harness"/>
+    <property name="inpdir.local" value="${basedir}/inpdir/"/>
+    <property name="test.location" value="${basedir}/testWorkDir"/>
+    <property name="driver.src" value="${basedir}/drivers"/>
 
-  <!-- Check that the necessary properties are setup -->
-  <target name="property-check">
-    <fail message="Please set the property harness.webhdfs.url to the namenode base url of the cluster"
-      unless="harness.webhdfs.url"/>
-    <fail message="Please set the property harness.templeton.url to the templeton server base url of the cluster"
-      unless="harness.templeton.url"/>
-    <fail message="Please set the property inpdir.hdfs to the test input directory on hdfs"
-      unless="inpdir.hdfs"/>
-  </target>
+    <!-- Check that the necessary properties are setup -->
+    <target name="property-check">
+        <fail message="Please set the property harness.webhdfs.url to the namenode base url of the cluster"
+              unless="harness.webhdfs.url"/>
+        <fail message="Please set the property harness.templeton.url to the templeton server base url of the cluster"
+              unless="harness.templeton.url"/>
+        <fail message="Please set the property inpdir.hdfs to the test input directory on hdfs"
+              unless="inpdir.hdfs"/>
+    </target>
 
-  <!-- Prep the test area -->
-  <target name="init-test">
-    <mkdir dir="${test.location}"/>
-  </target>
+    <!-- Prep the test area -->
+    <target name="init-test">
+        <mkdir dir="${test.location}"/>
+    </target>
 
-  <target name="test" depends="property-check, init-test" >
-    <property name="tests.to.run" value=""/>
-    <exec executable="${harness.dir}/test_harness.pl" dir="${test.location}" failonerror="true">
-      <env key="HARNESS_ROOT" value="${harness.dir}"/>
-      <env key="DRIVER_ROOT" value="${basedir}/drivers"/>
-      <env key="TH_WORKING_DIR" value="${test.location}"/>
-      <env key="TH_INPDIR_LOCAL" value="${inpdir.local}"/>
-      <env key="TH_INPDIR_HDFS" value="${inpdir.hdfs}"/>
-      <env key="TH_OUT" value="."/>
-      <env key="TH_ROOT" value="."/>
-      <env key="WEBHDFS_URL" value="${harness.webhdfs.url}"/>
-      <env key="TEMPLETON_URL" value="${harness.templeton.url}"/>
-      <env key="USER_NAME" value="${test.user.name}"/>
-      <env key="HARNESS_CONF" value="${basedir}/conf/default.conf"/>
-      <env key="SECURE_MODE" value="${secure.mode}"/>
-      <arg line="${tests.to.run}"/>
-      <arg value="${basedir}/tests/serverstatus.conf"/>
-      <arg value="${basedir}/tests/ddl.conf"/>
-      <arg value="${basedir}/tests/jobsubmission.conf"/>
-    </exec>
-  </target>
+    <target name="test" depends="property-check, init-test">
+        <property name="tests.to.run" value=""/>
+        <exec executable="${harness.dir}/test_harness.pl" dir="${test.location}" failonerror="true">
+            <env key="HARNESS_ROOT" value="${harness.dir}"/>
+            <env key="DRIVER_ROOT" value="${basedir}/drivers"/>
+            <env key="TH_WORKING_DIR" value="${test.location}"/>
+            <env key="TH_INPDIR_LOCAL" value="${inpdir.local}"/>
+            <env key="TH_INPDIR_HDFS" value="${inpdir.hdfs}"/>
+            <env key="TH_OUT" value="."/>
+            <env key="TH_ROOT" value="."/>
+            <env key="WEBHDFS_URL" value="${harness.webhdfs.url}"/>
+            <env key="TEMPLETON_URL" value="${harness.templeton.url}"/>
+            <env key="USER_NAME" value="${test.user.name}"/>
+            <env key="HARNESS_CONF" value="${basedir}/conf/default.conf"/>
+            <env key="SECURE_MODE" value="${secure.mode}"/>
+            <arg line="${tests.to.run}"/>
+            <arg value="${basedir}/tests/serverstatus.conf"/>
+            <arg value="${basedir}/tests/ddl.conf"/>
+            <arg value="${basedir}/tests/jobsubmission.conf"/>
+        </exec>
+    </target>
 
-  <target name="test-hcat-authorization" depends="property-check, init-test" >
-    <property name="tests.to.run" value=""/>
-    <exec executable="${harness.dir}/test_harness.pl" dir="${test.location}" failonerror="true">
-      <env key="HARNESS_ROOT" value="${harness.dir}"/>
-      <env key="DRIVER_ROOT" value="${basedir}/drivers"/>
-      <env key="TH_WORKING_DIR" value="${test.location}"/>
-      <env key="TH_INPDIR_LOCAL" value="${inpdir.local}"/>
-      <env key="TH_INPDIR_HDFS" value="${inpdir.hdfs}"/>
-      <env key="TH_OUT" value="."/>
-      <env key="TH_ROOT" value="."/>
-      <env key="WEBHDFS_URL" value="${harness.webhdfs.url}"/>
-      <env key="TEMPLETON_URL" value="${harness.templeton.url}"/>
-      <env key="USER_NAME" value="${test.user.name}"/>
-      <env key="GROUP_NAME" value="${test.group.name}"/>
-      <env key="GROUP_USER_NAME" value="${test.group.user.name}"/>
-      <env key="OTHER_USER_NAME" value="${test.other.user.name}"/>
-      <env key="HARNESS_CONF" value="${basedir}/conf/default.conf"/>
-      <env key="SECURE_MODE" value="${secure.mode}"/>
-      <env key="KEYTAB_DIR" value="${keytab.dir}"/>
-      <arg line="${tests.to.run}"/>
-      <arg value="${basedir}/tests/hcatperms.conf"/>
-    </exec>
-  </target>
-
+    <target name="test-hcat-authorization" depends="property-check, init-test">
+        <property name="tests.to.run" value=""/>
+        <exec executable="${harness.dir}/test_harness.pl" dir="${test.location}" failonerror="true">
+            <env key="HARNESS_ROOT" value="${harness.dir}"/>
+            <env key="DRIVER_ROOT" value="${basedir}/drivers"/>
+            <env key="TH_WORKING_DIR" value="${test.location}"/>
+            <env key="TH_INPDIR_LOCAL" value="${inpdir.local}"/>
+            <env key="TH_INPDIR_HDFS" value="${inpdir.hdfs}"/>
+            <env key="TH_OUT" value="."/>
+            <env key="TH_ROOT" value="."/>
+            <env key="WEBHDFS_URL" value="${harness.webhdfs.url}"/>
+            <env key="TEMPLETON_URL" value="${harness.templeton.url}"/>
+            <env key="USER_NAME" value="${test.user.name}"/>
+            <env key="GROUP_NAME" value="${test.group.name}"/>
+            <env key="GROUP_USER_NAME" value="${test.group.user.name}"/>
+            <env key="OTHER_USER_NAME" value="${test.other.user.name}"/>
+            <env key="HARNESS_CONF" value="${basedir}/conf/default.conf"/>
+            <env key="SECURE_MODE" value="${secure.mode}"/>
+            <env key="KEYTAB_DIR" value="${keytab.dir}"/>
+            <arg line="${tests.to.run}"/>
+            <arg value="${basedir}/tests/hcatperms.conf"/>
+        </exec>
+    </target>
 
 
 </project>
diff --git a/src/test/org/apache/hcatalog/ExitException.java b/src/test/org/apache/hcatalog/ExitException.java
index 4b56efd..5e429c5 100644
--- a/src/test/org/apache/hcatalog/ExitException.java
+++ b/src/test/org/apache/hcatalog/ExitException.java
@@ -18,20 +18,20 @@
 
 package org.apache.hcatalog;
 
-public  class ExitException extends SecurityException {
-  private static final long serialVersionUID = -1982617086752946683L;
-  private final int status;
+public class ExitException extends SecurityException {
+    private static final long serialVersionUID = -1982617086752946683L;
+    private final int status;
 
-  /**
-   * @return the status
-   */
-  public int getStatus() {
-    return status;
-  }
+    /**
+     * @return the status
+     */
+    public int getStatus() {
+        return status;
+    }
 
-  public ExitException(int status) {
+    public ExitException(int status) {
 
-    super("Raising exception, instead of System.exit(). Return code was: "+status);
-    this.status = status;
-  }
+        super("Raising exception, instead of System.exit(). Return code was: " + status);
+        this.status = status;
+    }
 }
diff --git a/src/test/org/apache/hcatalog/HcatTestUtils.java b/src/test/org/apache/hcatalog/HcatTestUtils.java
index e1f3e0e..3348339 100644
--- a/src/test/org/apache/hcatalog/HcatTestUtils.java
+++ b/src/test/org/apache/hcatalog/HcatTestUtils.java
@@ -33,74 +33,74 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-/** 
+/**
  * Utility methods for tests
  */
 public class HcatTestUtils {
-  private static final Logger LOG = LoggerFactory.getLogger(HcatTestUtils.class);
+    private static final Logger LOG = LoggerFactory.getLogger(HcatTestUtils.class);
 
-  public static FsPermission perm007 = FsPermission.createImmutable((short) 0007); // -------rwx
-  public static FsPermission perm070 = FsPermission.createImmutable((short) 0070); // ----rwx---
-  public static FsPermission perm700 = FsPermission.createImmutable((short) 0700); // -rwx------
-  public static FsPermission perm755 = FsPermission.createImmutable((short) 0755); // -rwxr-xr-x
-  public static FsPermission perm777 = FsPermission.createImmutable((short) 0777); // -rwxrwxrwx
-  public static FsPermission perm300 = FsPermission.createImmutable((short) 0300); // --wx------
-  public static FsPermission perm500 = FsPermission.createImmutable((short) 0500); // -r-x------
-  public static FsPermission perm555 = FsPermission.createImmutable((short) 0555); // -r-xr-xr-x
-  
-  /** 
-   * Returns the database path.
-   */
-  public static Path getDbPath(Hive hive, Warehouse wh, String dbName) throws MetaException, HiveException {
-    return wh.getDatabasePath(hive.getDatabase(dbName)); 
-  }
-  
-  /** 
-   * Removes all databases and tables from the metastore
-   */
-  public static void cleanupHMS(Hive hive, Warehouse wh, FsPermission defaultPerm) 
-      throws HiveException, MetaException, NoSuchObjectException {
-    for (String dbName : hive.getAllDatabases()) {
-      if (dbName.equals("default")) {
-        continue;
-      }
-      try {
-        Path path = getDbPath(hive, wh, dbName);
-        FileSystem whFs = path.getFileSystem(hive.getConf());
-        whFs.setPermission(path, defaultPerm);
-      } catch(IOException ex) {
-        //ignore
-      }
-      hive.dropDatabase(dbName, true, true, true);
-    }
-    
-    //clean tables in default db
-    for (String tablename : hive.getAllTables("default")) {
-      hive.dropTable("default", tablename, true, true);
-    }
-  }
+    public static FsPermission perm007 = FsPermission.createImmutable((short) 0007); // -------rwx
+    public static FsPermission perm070 = FsPermission.createImmutable((short) 0070); // ----rwx---
+    public static FsPermission perm700 = FsPermission.createImmutable((short) 0700); // -rwx------
+    public static FsPermission perm755 = FsPermission.createImmutable((short) 0755); // -rwxr-xr-x
+    public static FsPermission perm777 = FsPermission.createImmutable((short) 0777); // -rwxrwxrwx
+    public static FsPermission perm300 = FsPermission.createImmutable((short) 0300); // --wx------
+    public static FsPermission perm500 = FsPermission.createImmutable((short) 0500); // -r-x------
+    public static FsPermission perm555 = FsPermission.createImmutable((short) 0555); // -r-xr-xr-x
 
-  public static void createTestDataFile(String filename, String[] lines) throws IOException {
-    FileWriter writer = null;
-    try {
-      File file = new File(filename);
-      file.deleteOnExit();
-      writer = new FileWriter(file);
-      for (String line : lines) {
-        writer.write(line + "\n");
-      }
-    } finally {
-      if (writer != null) {
-        writer.close();
-      }
+    /**
+     * Returns the database path.
+     */
+    public static Path getDbPath(Hive hive, Warehouse wh, String dbName) throws MetaException, HiveException {
+        return wh.getDatabasePath(hive.getDatabase(dbName));
     }
 
-  }
+    /**
+     * Removes all databases and tables from the metastore
+     */
+    public static void cleanupHMS(Hive hive, Warehouse wh, FsPermission defaultPerm)
+        throws HiveException, MetaException, NoSuchObjectException {
+        for (String dbName : hive.getAllDatabases()) {
+            if (dbName.equals("default")) {
+                continue;
+            }
+            try {
+                Path path = getDbPath(hive, wh, dbName);
+                FileSystem whFs = path.getFileSystem(hive.getConf());
+                whFs.setPermission(path, defaultPerm);
+            } catch (IOException ex) {
+                //ignore
+            }
+            hive.dropDatabase(dbName, true, true, true);
+        }
 
-  public static boolean isHadoop23() {
-      String version = org.apache.hadoop.util.VersionInfo.getVersion();
-      if (version.matches("\\b0\\.23\\..+\\b"))
-          return true;
-      return false;
-  }
+        //clean tables in default db
+        for (String tablename : hive.getAllTables("default")) {
+            hive.dropTable("default", tablename, true, true);
+        }
+    }
+
+    public static void createTestDataFile(String filename, String[] lines) throws IOException {
+        FileWriter writer = null;
+        try {
+            File file = new File(filename);
+            file.deleteOnExit();
+            writer = new FileWriter(file);
+            for (String line : lines) {
+                writer.write(line + "\n");
+            }
+        } finally {
+            if (writer != null) {
+                writer.close();
+            }
+        }
+
+    }
+
+    public static boolean isHadoop23() {
+        String version = org.apache.hadoop.util.VersionInfo.getVersion();
+        if (version.matches("\\b0\\.23\\..+\\b"))
+            return true;
+        return false;
+    }
 }
diff --git a/src/test/org/apache/hcatalog/MiniCluster.java b/src/test/org/apache/hcatalog/MiniCluster.java
index fc8aa5c..d7b54e6 100644
--- a/src/test/org/apache/hcatalog/MiniCluster.java
+++ b/src/test/org/apache/hcatalog/MiniCluster.java
@@ -42,152 +42,159 @@
  * environment for Pig to run on top of the mini cluster.
  */
 public class MiniCluster {
-  private MiniDFSCluster m_dfs = null;
-  private MiniMRCluster m_mr = null;
-  private FileSystem m_fileSys = null;
-  private JobConf m_conf = null;
+    private MiniDFSCluster m_dfs = null;
+    private MiniMRCluster m_mr = null;
+    private FileSystem m_fileSys = null;
+    private JobConf m_conf = null;
 
-  private final static MiniCluster INSTANCE = new MiniCluster();
-  private static boolean isSetup = true;
+    private final static MiniCluster INSTANCE = new MiniCluster();
+    private static boolean isSetup = true;
 
-  private MiniCluster() {
-    setupMiniDfsAndMrClusters();
-  }
-
-  private void setupMiniDfsAndMrClusters() {
-    try {
-      final int dataNodes = 1;     // There will be 4 data nodes
-      final int taskTrackers = 1;  // There will be 4 task tracker nodes
-      Configuration config = new Configuration();
-
-      // Builds and starts the mini dfs and mapreduce clusters
-      System.setProperty("hadoop.log.dir", ".");
-      m_dfs = new MiniDFSCluster(config, dataNodes, true, null);
-
-      m_fileSys = m_dfs.getFileSystem();
-      m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getUri().toString(), 1);
-
-      // Create the configuration hadoop-site.xml file
-      File conf_dir = new File(System.getProperty("user.home"), "pigtest/conf/");
-      conf_dir.mkdirs();
-      File conf_file = new File(conf_dir, "hadoop-site.xml");
-
-      // Write the necessary config info to hadoop-site.xml
-      m_conf = m_mr.createJobConf();
-      m_conf.setInt("mapred.submit.replication", 1);
-      m_conf.set("dfs.datanode.address", "0.0.0.0:0");
-      m_conf.set("dfs.datanode.http.address", "0.0.0.0:0");
-      m_conf.writeXml(new FileOutputStream(conf_file));
-
-      // Set the system properties needed by Pig
-      System.setProperty("cluster", m_conf.get("mapred.job.tracker"));
-      System.setProperty("namenode", m_conf.get("fs.default.name"));
-      System.setProperty("junit.hadoop.conf", conf_dir.getPath());
-    } catch (IOException e) {
-      throw new RuntimeException(e);
+    private MiniCluster() {
+        setupMiniDfsAndMrClusters();
     }
-  }
 
-  /**
-   * Returns the single instance of class MiniClusterBuilder that
-   * represents the resouces for a mini dfs cluster and a mini
-   * mapreduce cluster.
-   */
-  public static MiniCluster buildCluster() {
-    if(! isSetup){
-      INSTANCE.setupMiniDfsAndMrClusters();
-      isSetup = true;
+    private void setupMiniDfsAndMrClusters() {
+        try {
+            final int dataNodes = 1;     // There will be 4 data nodes
+            final int taskTrackers = 1;  // There will be 4 task tracker nodes
+            Configuration config = new Configuration();
+
+            // Builds and starts the mini dfs and mapreduce clusters
+            System.setProperty("hadoop.log.dir", ".");
+            m_dfs = new MiniDFSCluster(config, dataNodes, true, null);
+
+            m_fileSys = m_dfs.getFileSystem();
+            m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getUri().toString(), 1);
+
+            // Create the configuration hadoop-site.xml file
+            File conf_dir = new File(System.getProperty("user.home"), "pigtest/conf/");
+            conf_dir.mkdirs();
+            File conf_file = new File(conf_dir, "hadoop-site.xml");
+
+            // Write the necessary config info to hadoop-site.xml
+            m_conf = m_mr.createJobConf();
+            m_conf.setInt("mapred.submit.replication", 1);
+            m_conf.set("dfs.datanode.address", "0.0.0.0:0");
+            m_conf.set("dfs.datanode.http.address", "0.0.0.0:0");
+            m_conf.writeXml(new FileOutputStream(conf_file));
+
+            // Set the system properties needed by Pig
+            System.setProperty("cluster", m_conf.get("mapred.job.tracker"));
+            System.setProperty("namenode", m_conf.get("fs.default.name"));
+            System.setProperty("junit.hadoop.conf", conf_dir.getPath());
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        }
     }
-    return INSTANCE;
-  }
 
-  public void shutDown(){
-    INSTANCE.shutdownMiniDfsAndMrClusters();
-  }
-
-  @Override
-  protected void finalize() {
-    shutdownMiniDfsAndMrClusters();
-  }
-
-  private void shutdownMiniDfsAndMrClusters() {
-    isSetup = false;
-    try {
-      if (m_fileSys != null) { m_fileSys.close(); }
-    } catch (IOException e) {
-      e.printStackTrace();
+    /**
+     * Returns the single instance of class MiniClusterBuilder that
+     * represents the resouces for a mini dfs cluster and a mini
+     * mapreduce cluster.
+     */
+    public static MiniCluster buildCluster() {
+        if (!isSetup) {
+            INSTANCE.setupMiniDfsAndMrClusters();
+            isSetup = true;
+        }
+        return INSTANCE;
     }
-    if (m_dfs != null) { m_dfs.shutdown(); }
-    if (m_mr != null) { m_mr.shutdown(); }
-    m_fileSys = null;
-    m_dfs = null;
-    m_mr = null;
-  }
 
-  public Properties getProperties() {
-    errorIfNotSetup();
-    Properties properties = new Properties();
-    assert m_conf != null;
-    Iterator<Map.Entry<String, String>> iter = m_conf.iterator();
-    while (iter.hasNext()) {
-        Map.Entry<String, String> entry = iter.next();
-        properties.put(entry.getKey(), entry.getValue());
+    public void shutDown() {
+        INSTANCE.shutdownMiniDfsAndMrClusters();
     }
-    return properties;
-  }
 
-  public void setProperty(String name, String value) {
-    errorIfNotSetup();
-    m_conf.set(name, value);
-  }
+    @Override
+    protected void finalize() {
+        shutdownMiniDfsAndMrClusters();
+    }
 
-  public FileSystem getFileSystem() {
-    errorIfNotSetup();
-    return m_fileSys;
-  }
+    private void shutdownMiniDfsAndMrClusters() {
+        isSetup = false;
+        try {
+            if (m_fileSys != null) {
+                m_fileSys.close();
+            }
+        } catch (IOException e) {
+            e.printStackTrace();
+        }
+        if (m_dfs != null) {
+            m_dfs.shutdown();
+        }
+        if (m_mr != null) {
+            m_mr.shutdown();
+        }
+        m_fileSys = null;
+        m_dfs = null;
+        m_mr = null;
+    }
 
-  /**
-   * Throw RunTimeException if isSetup is false
-   */
-   private void errorIfNotSetup(){
-     if(isSetup) {
-       return;
-     }
-     String msg = "function called on MiniCluster that has been shutdown";
-     throw new RuntimeException(msg);
-   }
+    public Properties getProperties() {
+        errorIfNotSetup();
+        Properties properties = new Properties();
+        assert m_conf != null;
+        Iterator<Map.Entry<String, String>> iter = m_conf.iterator();
+        while (iter.hasNext()) {
+            Map.Entry<String, String> entry = iter.next();
+            properties.put(entry.getKey(), entry.getValue());
+        }
+        return properties;
+    }
 
-   static public void createInputFile(MiniCluster miniCluster, String fileName,
-       String[] inputData)
-   throws IOException {
-     FileSystem fs = miniCluster.getFileSystem();
-     createInputFile(fs, fileName, inputData);
-   }
+    public void setProperty(String name, String value) {
+        errorIfNotSetup();
+        m_conf.set(name, value);
+    }
 
-   static public void createInputFile(FileSystem fs, String fileName,
-       String[] inputData) throws IOException {
-     Path path = new Path(fileName);
-     if(fs.exists(path)) {
-       throw new IOException("File " + fileName + " already exists on the minicluster");
-     }
-     FSDataOutputStream stream = fs.create(path);
-     PrintWriter pw = new PrintWriter(new OutputStreamWriter(stream, "UTF-8"));
-     for (int i=0; i<inputData.length; i++){
-       pw.println(inputData[i]);
-     }
-     pw.close();
+    public FileSystem getFileSystem() {
+        errorIfNotSetup();
+        return m_fileSys;
+    }
 
-   }
-   /**
-    * Helper to remove a dfs file from the minicluster DFS
-    *
-    * @param miniCluster reference to the Minicluster where the file should be deleted
-    * @param fileName pathname of the file to be deleted
-    * @throws IOException
-    */
-   static public void deleteFile(MiniCluster miniCluster, String fileName)
-   throws IOException {
-     FileSystem fs = miniCluster.getFileSystem();
-     fs.delete(new Path(fileName), true);
-   }
+    /**
+     * Throw RunTimeException if isSetup is false
+     */
+    private void errorIfNotSetup() {
+        if (isSetup) {
+            return;
+        }
+        String msg = "function called on MiniCluster that has been shutdown";
+        throw new RuntimeException(msg);
+    }
+
+    static public void createInputFile(MiniCluster miniCluster, String fileName,
+                                       String[] inputData)
+        throws IOException {
+        FileSystem fs = miniCluster.getFileSystem();
+        createInputFile(fs, fileName, inputData);
+    }
+
+    static public void createInputFile(FileSystem fs, String fileName,
+                                       String[] inputData) throws IOException {
+        Path path = new Path(fileName);
+        if (fs.exists(path)) {
+            throw new IOException("File " + fileName + " already exists on the minicluster");
+        }
+        FSDataOutputStream stream = fs.create(path);
+        PrintWriter pw = new PrintWriter(new OutputStreamWriter(stream, "UTF-8"));
+        for (int i = 0; i < inputData.length; i++) {
+            pw.println(inputData[i]);
+        }
+        pw.close();
+
+    }
+
+    /**
+     * Helper to remove a dfs file from the minicluster DFS
+     *
+     * @param miniCluster reference to the Minicluster where the file should be deleted
+     * @param fileName pathname of the file to be deleted
+     * @throws IOException
+     */
+    static public void deleteFile(MiniCluster miniCluster, String fileName)
+        throws IOException {
+        FileSystem fs = miniCluster.getFileSystem();
+        fs.delete(new Path(fileName), true);
+    }
 }
diff --git a/src/test/org/apache/hcatalog/NoExitSecurityManager.java b/src/test/org/apache/hcatalog/NoExitSecurityManager.java
index 8e775a7..b475602 100644
--- a/src/test/org/apache/hcatalog/NoExitSecurityManager.java
+++ b/src/test/org/apache/hcatalog/NoExitSecurityManager.java
@@ -22,20 +22,20 @@
 
 public class NoExitSecurityManager extends SecurityManager {
 
-  @Override
-  public void checkPermission(Permission perm) {
-    // allow anything.
-  }
+    @Override
+    public void checkPermission(Permission perm) {
+        // allow anything.
+    }
 
-  @Override
-  public void checkPermission(Permission perm, Object context) {
-    // allow anything.
-  }
+    @Override
+    public void checkPermission(Permission perm, Object context) {
+        // allow anything.
+    }
 
-  @Override
-  public void checkExit(int status) {
+    @Override
+    public void checkExit(int status) {
 
-    super.checkExit(status);
-    throw new ExitException(status);
-  }
+        super.checkExit(status);
+        throw new ExitException(status);
+    }
 }
diff --git a/src/test/org/apache/hcatalog/cli/DummyStorageHandler.java b/src/test/org/apache/hcatalog/cli/DummyStorageHandler.java
index f1d8b13..ecc105e 100644
--- a/src/test/org/apache/hcatalog/cli/DummyStorageHandler.java
+++ b/src/test/org/apache/hcatalog/cli/DummyStorageHandler.java
@@ -91,7 +91,7 @@
 
     @Override
     public HiveAuthorizationProvider getAuthorizationProvider()
-            throws HiveException {
+        throws HiveException {
         return new DummyAuthProvider();
     }
 
@@ -140,8 +140,8 @@
          */
         @Override
         public void authorize(Privilege[] readRequiredPriv,
-                Privilege[] writeRequiredPriv) throws HiveException,
-                AuthorizationException {
+                              Privilege[] writeRequiredPriv) throws HiveException,
+            AuthorizationException {
         }
 
         /* @param db
@@ -153,8 +153,8 @@
          */
         @Override
         public void authorize(Database db, Privilege[] readRequiredPriv,
-                Privilege[] writeRequiredPriv) throws HiveException,
-                AuthorizationException {
+                              Privilege[] writeRequiredPriv) throws HiveException,
+            AuthorizationException {
         }
 
         /* @param table
@@ -166,8 +166,8 @@
          */
         @Override
         public void authorize(org.apache.hadoop.hive.ql.metadata.Table table, Privilege[] readRequiredPriv,
-                Privilege[] writeRequiredPriv) throws HiveException,
-                AuthorizationException {
+                              Privilege[] writeRequiredPriv) throws HiveException,
+            AuthorizationException {
         }
 
         /* @param part
@@ -179,8 +179,8 @@
          */
         @Override
         public void authorize(Partition part, Privilege[] readRequiredPriv,
-                Privilege[] writeRequiredPriv) throws HiveException,
-                AuthorizationException {
+                              Privilege[] writeRequiredPriv) throws HiveException,
+            AuthorizationException {
         }
 
         /* @param table
@@ -194,8 +194,8 @@
          */
         @Override
         public void authorize(org.apache.hadoop.hive.ql.metadata.Table table, Partition part, List<String> columns,
-                Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-                throws HiveException, AuthorizationException {
+                              Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+            throws HiveException, AuthorizationException {
         }
 
     }
@@ -205,7 +205,7 @@
      * mapred.InputFormat required by HiveStorageHandler.
      */
     class DummyInputFormat implements
-            InputFormat<WritableComparable, HCatRecord> {
+        InputFormat<WritableComparable, HCatRecord> {
 
         /*
          * @see
@@ -215,8 +215,8 @@
          */
         @Override
         public RecordReader<WritableComparable, HCatRecord> getRecordReader(
-                InputSplit split, JobConf jobconf, Reporter reporter)
-                throws IOException {
+            InputSplit split, JobConf jobconf, Reporter reporter)
+            throws IOException {
             throw new IOException("This operation is not supported.");
         }
 
@@ -227,7 +227,7 @@
          */
         @Override
         public InputSplit[] getSplits(JobConf jobconf, int number)
-                throws IOException {
+            throws IOException {
             throw new IOException("This operation is not supported.");
         }
     }
@@ -237,8 +237,8 @@
      * mapred.OutputFormat and HiveOutputFormat required by HiveStorageHandler.
      */
     class DummyOutputFormat implements
-            OutputFormat<WritableComparable<?>, HCatRecord>,
-            HiveOutputFormat<WritableComparable<?>, HCatRecord> {
+        OutputFormat<WritableComparable<?>, HCatRecord>,
+        HiveOutputFormat<WritableComparable<?>, HCatRecord> {
 
         /*
          * @see
@@ -247,7 +247,7 @@
          */
         @Override
         public void checkOutputSpecs(FileSystem fs, JobConf jobconf)
-                throws IOException {
+            throws IOException {
             throw new IOException("This operation is not supported.");
 
         }
@@ -260,8 +260,8 @@
          */
         @Override
         public RecordWriter<WritableComparable<?>, HCatRecord> getRecordWriter(
-                FileSystem fs, JobConf jobconf, String str,
-                Progressable progress) throws IOException {
+            FileSystem fs, JobConf jobconf, String str,
+            Progressable progress) throws IOException {
             throw new IOException("This operation is not supported.");
         }
 
@@ -274,10 +274,10 @@
          */
         @Override
         public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getHiveRecordWriter(
-                JobConf jc, Path finalOutPath,
-                Class<? extends Writable> valueClass, boolean isCompressed,
-                Properties tableProperties, Progressable progress)
-                throws IOException {
+            JobConf jc, Path finalOutPath,
+            Class<? extends Writable> valueClass, boolean isCompressed,
+            Properties tableProperties, Progressable progress)
+            throws IOException {
             throw new IOException("This operation is not supported.");
         }
 
diff --git a/src/test/org/apache/hcatalog/cli/TestPermsGrp.java b/src/test/org/apache/hcatalog/cli/TestPermsGrp.java
index c5abe4d..96c0013 100644
--- a/src/test/org/apache/hcatalog/cli/TestPermsGrp.java
+++ b/src/test/org/apache/hcatalog/cli/TestPermsGrp.java
@@ -53,179 +53,177 @@
 
 public class TestPermsGrp extends TestCase {
 
-  private boolean isServerRunning = false;
-  private static final int msPort = 20101;
-  private HiveConf hcatConf;
-  private Warehouse clientWH;
-  private HiveMetaStoreClient msc;
-  private static final Logger LOG = LoggerFactory.getLogger(TestPermsGrp.class);
+    private boolean isServerRunning = false;
+    private static final int msPort = 20101;
+    private HiveConf hcatConf;
+    private Warehouse clientWH;
+    private HiveMetaStoreClient msc;
+    private static final Logger LOG = LoggerFactory.getLogger(TestPermsGrp.class);
 
-  @Override
-  protected void tearDown() throws Exception {
-    System.setSecurityManager(securityManager);
-  }
-
-  @Override
-  protected void setUp() throws Exception {
-
-    if(isServerRunning) {
-      return;
+    @Override
+    protected void tearDown() throws Exception {
+        System.setSecurityManager(securityManager);
     }
 
-    MetaStoreUtils.startMetaStore(msPort, ShimLoader.getHadoopThriftAuthBridge());
+    @Override
+    protected void setUp() throws Exception {
 
-    isServerRunning = true;
+        if (isServerRunning) {
+            return;
+        }
 
-    securityManager = System.getSecurityManager();
-    System.setSecurityManager(new NoExitSecurityManager());
+        MetaStoreUtils.startMetaStore(msPort, ShimLoader.getHadoopThriftAuthBridge());
 
-    hcatConf = new HiveConf(this.getClass());
-    hcatConf.set("hive.metastore.local", "false");
-    hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://127.0.0.1:" + msPort);
-    hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTRETRIES, 3);
+        isServerRunning = true;
 
-    hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
-    hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    clientWH = new Warehouse(hcatConf);
-    msc = new HiveMetaStoreClient(hcatConf,null);
-    System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
-    System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
-  }
+        securityManager = System.getSecurityManager();
+        System.setSecurityManager(new NoExitSecurityManager());
 
+        hcatConf = new HiveConf(this.getClass());
+        hcatConf.set("hive.metastore.local", "false");
+        hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://127.0.0.1:" + msPort);
+        hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTRETRIES, 3);
 
-  public void testCustomPerms() throws Exception {
-
-    String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME;
-    String tblName = "simptbl";
-    String typeName = "Person";
-
-    try {
-
-      // Lets first test for default permissions, this is the case when user specified nothing.
-      Table tbl = getTable(dbName,tblName,typeName);
-      msc.createTable(tbl);
-      Database db = Hive.get(hcatConf).getDatabase(dbName);
-      Path dfsPath = clientWH.getTablePath(db, tblName);
-      cleanupTbl(dbName, tblName, typeName);
-
-      // Next user did specify perms.
-      try{
-        HCatCli.main(new String[]{"-e","create table simptbl (name string) stored as RCFILE", "-p","rwx-wx---"});
-      }
-      catch(Exception e){
-        assertTrue(e instanceof ExitException);
-        assertEquals(((ExitException)e).getStatus(), 0);
-      }
-      dfsPath = clientWH.getTablePath(db, tblName);
-      assertTrue(dfsPath.getFileSystem(hcatConf).getFileStatus(dfsPath).getPermission().equals(FsPermission.valueOf("drwx-wx---")));
-
-      cleanupTbl(dbName, tblName, typeName);
-
-      // User specified perms in invalid format.
-      hcatConf.set(HCatConstants.HCAT_PERMS, "rwx");
-      // make sure create table fails.
-      try{
-        HCatCli.main(new String[]{"-e","create table simptbl (name string) stored as RCFILE", "-p","rwx"});
-        assert false;
-      }catch(Exception me){
-        assertTrue(me instanceof ExitException);
-      }
-      // No physical dir gets created.
-      dfsPath = clientWH.getTablePath(db,tblName);
-      try{
-        dfsPath.getFileSystem(hcatConf).getFileStatus(dfsPath);
-        assert false;
-      } catch(Exception fnfe){
-        assertTrue(fnfe instanceof FileNotFoundException);
-      }
-
-      // And no metadata gets created.
-      try{
-        msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName);
-        assert false;
-      }catch (Exception e){
-        assertTrue(e instanceof NoSuchObjectException);
-        assertEquals("default.simptbl table not found", e.getMessage());
-      }
-
-      // test for invalid group name
-      hcatConf.set(HCatConstants.HCAT_PERMS, "drw-rw-rw-");
-      hcatConf.set(HCatConstants.HCAT_GROUP, "THIS_CANNOT_BE_A_VALID_GRP_NAME_EVER");
-
-      try{
-        // create table must fail.
-        HCatCli.main(new String[]{"-e","create table simptbl (name string) stored as RCFILE", "-p","rw-rw-rw-","-g","THIS_CANNOT_BE_A_VALID_GRP_NAME_EVER"});
-        assert false;
-      }catch (Exception me){
-        assertTrue(me instanceof SecurityException);
-      }
-
-      try{
-        // no metadata should get created.
-        msc.getTable(dbName, tblName);
-        assert false;
-      }catch (Exception e){
-        assertTrue(e instanceof NoSuchObjectException);
-        assertEquals("default.simptbl table not found", e.getMessage());
-      }
-      try{
-        // neither dir should get created.
-        dfsPath.getFileSystem(hcatConf).getFileStatus(dfsPath);
-        assert false;
-      } catch(Exception e){
-        assertTrue(e instanceof FileNotFoundException);
-      }
-
-    } catch (Exception e) {
-        LOG.error("testCustomPerms failed.", e);
-      throw e;
+        hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
+        hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+        hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+        hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+        clientWH = new Warehouse(hcatConf);
+        msc = new HiveMetaStoreClient(hcatConf, null);
+        System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
+        System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
     }
-  }
 
-  private void silentDropDatabase(String dbName) throws MetaException, TException {
-    try {
-      for (String tableName : msc.getTables(dbName, "*")) {
-        msc.dropTable(dbName, tableName);
-      }
 
-    } catch (NoSuchObjectException e) {
+    public void testCustomPerms() throws Exception {
+
+        String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME;
+        String tblName = "simptbl";
+        String typeName = "Person";
+
+        try {
+
+            // Lets first test for default permissions, this is the case when user specified nothing.
+            Table tbl = getTable(dbName, tblName, typeName);
+            msc.createTable(tbl);
+            Database db = Hive.get(hcatConf).getDatabase(dbName);
+            Path dfsPath = clientWH.getTablePath(db, tblName);
+            cleanupTbl(dbName, tblName, typeName);
+
+            // Next user did specify perms.
+            try {
+                HCatCli.main(new String[]{"-e", "create table simptbl (name string) stored as RCFILE", "-p", "rwx-wx---"});
+            } catch (Exception e) {
+                assertTrue(e instanceof ExitException);
+                assertEquals(((ExitException) e).getStatus(), 0);
+            }
+            dfsPath = clientWH.getTablePath(db, tblName);
+            assertTrue(dfsPath.getFileSystem(hcatConf).getFileStatus(dfsPath).getPermission().equals(FsPermission.valueOf("drwx-wx---")));
+
+            cleanupTbl(dbName, tblName, typeName);
+
+            // User specified perms in invalid format.
+            hcatConf.set(HCatConstants.HCAT_PERMS, "rwx");
+            // make sure create table fails.
+            try {
+                HCatCli.main(new String[]{"-e", "create table simptbl (name string) stored as RCFILE", "-p", "rwx"});
+                assert false;
+            } catch (Exception me) {
+                assertTrue(me instanceof ExitException);
+            }
+            // No physical dir gets created.
+            dfsPath = clientWH.getTablePath(db, tblName);
+            try {
+                dfsPath.getFileSystem(hcatConf).getFileStatus(dfsPath);
+                assert false;
+            } catch (Exception fnfe) {
+                assertTrue(fnfe instanceof FileNotFoundException);
+            }
+
+            // And no metadata gets created.
+            try {
+                msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName);
+                assert false;
+            } catch (Exception e) {
+                assertTrue(e instanceof NoSuchObjectException);
+                assertEquals("default.simptbl table not found", e.getMessage());
+            }
+
+            // test for invalid group name
+            hcatConf.set(HCatConstants.HCAT_PERMS, "drw-rw-rw-");
+            hcatConf.set(HCatConstants.HCAT_GROUP, "THIS_CANNOT_BE_A_VALID_GRP_NAME_EVER");
+
+            try {
+                // create table must fail.
+                HCatCli.main(new String[]{"-e", "create table simptbl (name string) stored as RCFILE", "-p", "rw-rw-rw-", "-g", "THIS_CANNOT_BE_A_VALID_GRP_NAME_EVER"});
+                assert false;
+            } catch (Exception me) {
+                assertTrue(me instanceof SecurityException);
+            }
+
+            try {
+                // no metadata should get created.
+                msc.getTable(dbName, tblName);
+                assert false;
+            } catch (Exception e) {
+                assertTrue(e instanceof NoSuchObjectException);
+                assertEquals("default.simptbl table not found", e.getMessage());
+            }
+            try {
+                // neither dir should get created.
+                dfsPath.getFileSystem(hcatConf).getFileStatus(dfsPath);
+                assert false;
+            } catch (Exception e) {
+                assertTrue(e instanceof FileNotFoundException);
+            }
+
+        } catch (Exception e) {
+            LOG.error("testCustomPerms failed.", e);
+            throw e;
+        }
     }
-  }
 
-  private void cleanupTbl(String dbName, String tblName, String typeName) throws NoSuchObjectException, MetaException, TException, InvalidOperationException{
+    private void silentDropDatabase(String dbName) throws MetaException, TException {
+        try {
+            for (String tableName : msc.getTables(dbName, "*")) {
+                msc.dropTable(dbName, tableName);
+            }
 
-    msc.dropTable(dbName, tblName);
-    msc.dropType(typeName);
-  }
+        } catch (NoSuchObjectException e) {
+        }
+    }
 
-  private Table getTable(String dbName, String tblName, String typeName) throws NoSuchObjectException, MetaException, TException, AlreadyExistsException, InvalidObjectException{
+    private void cleanupTbl(String dbName, String tblName, String typeName) throws NoSuchObjectException, MetaException, TException, InvalidOperationException {
 
-    msc.dropTable(dbName, tblName);
-    silentDropDatabase(dbName);
+        msc.dropTable(dbName, tblName);
+        msc.dropType(typeName);
+    }
+
+    private Table getTable(String dbName, String tblName, String typeName) throws NoSuchObjectException, MetaException, TException, AlreadyExistsException, InvalidObjectException {
+
+        msc.dropTable(dbName, tblName);
+        silentDropDatabase(dbName);
 
 
-    msc.dropType(typeName);
-    Type typ1 = new Type();
-    typ1.setName(typeName);
-    typ1.setFields(new ArrayList<FieldSchema>(1));
-    typ1.getFields().add(new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
-    msc.createType(typ1);
+        msc.dropType(typeName);
+        Type typ1 = new Type();
+        typ1.setName(typeName);
+        typ1.setFields(new ArrayList<FieldSchema>(1));
+        typ1.getFields().add(new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
+        msc.createType(typ1);
 
-    Table tbl = new Table();
-    tbl.setDbName(dbName);
-    tbl.setTableName(tblName);
-    StorageDescriptor sd = new StorageDescriptor();
-    tbl.setSd(sd);
-    sd.setCols(typ1.getFields());
+        Table tbl = new Table();
+        tbl.setDbName(dbName);
+        tbl.setTableName(tblName);
+        StorageDescriptor sd = new StorageDescriptor();
+        tbl.setSd(sd);
+        sd.setCols(typ1.getFields());
 
-    sd.setSerdeInfo(new SerDeInfo());
-    return tbl;
-  }
+        sd.setSerdeInfo(new SerDeInfo());
+        return tbl;
+    }
 
 
-
-  private SecurityManager securityManager;
+    private SecurityManager securityManager;
 
 }
diff --git a/src/test/org/apache/hcatalog/cli/TestSemanticAnalysis.java b/src/test/org/apache/hcatalog/cli/TestSemanticAnalysis.java
index a8924ab..20a36c5 100644
--- a/src/test/org/apache/hcatalog/cli/TestSemanticAnalysis.java
+++ b/src/test/org/apache/hcatalog/cli/TestSemanticAnalysis.java
@@ -52,340 +52,339 @@
 
 public class TestSemanticAnalysis extends HCatBaseTest {
 
-  private static final Logger LOG = LoggerFactory.getLogger(TestSemanticAnalysis.class);
-  private static final String TBL_NAME = "junit_sem_analysis";
+    private static final Logger LOG = LoggerFactory.getLogger(TestSemanticAnalysis.class);
+    private static final String TBL_NAME = "junit_sem_analysis";
 
-  private Driver hcatDriver = null;
-  private String query;
+    private Driver hcatDriver = null;
+    private String query;
 
-  @Before
-  public void setUpHCatDriver() throws IOException {
-    if (hcatDriver == null) {
-      HiveConf hcatConf = new HiveConf(hiveConf);
-      hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
-          HCatSemanticAnalyzer.class.getName());
-      hcatDriver = new Driver(hcatConf);
-      SessionState.start(new CliSessionState(hcatConf));
-    }
-  }
-
-  @Test
-  public void testDescDB() throws CommandNeedRetryException, IOException {
-    hcatDriver.run("drop database mydb cascade");
-    assertEquals(0, hcatDriver.run("create database mydb").getResponseCode());
-    CommandProcessorResponse resp = hcatDriver.run("describe database mydb");
-    assertEquals(0, resp.getResponseCode());
-    ArrayList<String> result = new ArrayList<String>();
-    hcatDriver.getResults(result);
-    assertTrue(result.get(0).contains("mydb.db"));
-    hcatDriver.run("drop database mydb cascade");
-  }
-
-  @Test
-  public void testCreateTblWithLowerCasePartNames() throws CommandNeedRetryException, MetaException, TException, NoSuchObjectException{
-    driver.run("drop table junit_sem_analysis");
-    CommandProcessorResponse resp = driver.run("create table junit_sem_analysis (a int) partitioned by (B string) stored as TEXTFILE");
-    assertEquals(resp.getResponseCode(), 0);
-    assertEquals(null, resp.getErrorMessage());
-    Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME);
-    assertEquals("Partition key name case problem", "b" , tbl.getPartitionKeys().get(0).getName());
-    driver.run("drop table junit_sem_analysis");
-  }
-
-  @Test
-  public void testAlterTblFFpart() throws MetaException, TException, NoSuchObjectException, CommandNeedRetryException {
-
-    driver.run("drop table junit_sem_analysis");
-    driver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as TEXTFILE");
-    driver.run("alter table junit_sem_analysis add partition (b='2010-10-10')");
-    hcatDriver.run("alter table junit_sem_analysis partition (b='2010-10-10') set fileformat RCFILE");
-
-    Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME);
-    assertEquals(TextInputFormat.class.getName(), tbl.getSd().getInputFormat());
-    assertEquals(HiveIgnoreKeyTextOutputFormat.class.getName(),tbl.getSd().getOutputFormat());
-
-    List<String> partVals = new ArrayList<String>(1);
-    partVals.add("2010-10-10");
-    Partition part = client.getPartition(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME, partVals);
-
-    assertEquals(RCFileInputFormat.class.getName(),part.getSd().getInputFormat());
-    assertEquals(RCFileOutputFormat.class.getName(),part.getSd().getOutputFormat());
-
-    hcatDriver.run("drop table junit_sem_analysis");
-  }
-
-  @Test
-  public void testUsNonExistentDB() throws CommandNeedRetryException {
-      CommandProcessorResponse resp = hcatDriver.run("use no_such_db");
-      assertEquals(1, resp.getResponseCode());
-  }
-
-  @Test
-  public void testDatabaseOperations() throws MetaException, CommandNeedRetryException {
-
-    List<String> dbs = client.getAllDatabases();
-    String testDb1 = "testdatabaseoperatons1";
-    String testDb2 = "testdatabaseoperatons2";
-
-    if (dbs.contains(testDb1.toLowerCase())){
-      assertEquals(0,hcatDriver.run("drop database "+testDb1).getResponseCode());
+    @Before
+    public void setUpHCatDriver() throws IOException {
+        if (hcatDriver == null) {
+            HiveConf hcatConf = new HiveConf(hiveConf);
+            hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
+                    HCatSemanticAnalyzer.class.getName());
+            hcatDriver = new Driver(hcatConf);
+            SessionState.start(new CliSessionState(hcatConf));
+        }
     }
 
-    if (dbs.contains(testDb2.toLowerCase())){
-      assertEquals(0,hcatDriver.run("drop database "+testDb2).getResponseCode());
+    @Test
+    public void testDescDB() throws CommandNeedRetryException, IOException {
+        hcatDriver.run("drop database mydb cascade");
+        assertEquals(0, hcatDriver.run("create database mydb").getResponseCode());
+        CommandProcessorResponse resp = hcatDriver.run("describe database mydb");
+        assertEquals(0, resp.getResponseCode());
+        ArrayList<String> result = new ArrayList<String>();
+        hcatDriver.getResults(result);
+        assertTrue(result.get(0).contains("mydb.db"));
+        hcatDriver.run("drop database mydb cascade");
     }
 
-    assertEquals(0,hcatDriver.run("create database "+testDb1).getResponseCode());
-    assertTrue(client.getAllDatabases().contains(testDb1));
-    assertEquals(0, hcatDriver.run("create database if not exists " + testDb1).getResponseCode());
-    assertTrue(client.getAllDatabases().contains(testDb1));
-    assertEquals(0,hcatDriver.run("create database if not exists "+testDb2).getResponseCode());
-    assertTrue(client.getAllDatabases().contains(testDb2));
-
-    assertEquals(0,hcatDriver.run("drop database "+testDb1).getResponseCode());
-    assertEquals(0,hcatDriver.run("drop database "+testDb2).getResponseCode());
-    assertFalse(client.getAllDatabases().contains(testDb1));
-    assertFalse(client.getAllDatabases().contains(testDb2));
-  }
-
-  @Test
-  public void testCreateTableIfNotExists() throws MetaException, TException, NoSuchObjectException, CommandNeedRetryException{
-
-    hcatDriver.run("drop table "+ TBL_NAME);
-    hcatDriver.run("create table junit_sem_analysis (a int) stored as RCFILE");
-    Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME);
-    List<FieldSchema> cols = tbl.getSd().getCols();
-    assertEquals(1, cols.size());
-    assertTrue(cols.get(0).equals(new FieldSchema("a", "int", null)));
-    assertEquals(RCFileInputFormat.class.getName(), tbl.getSd().getInputFormat());
-    assertEquals(RCFileOutputFormat.class.getName(), tbl.getSd().getOutputFormat());
-
-    CommandProcessorResponse resp = hcatDriver.run("create table if not exists junit_sem_analysis (a int) stored as RCFILE");
-    assertEquals(0, resp.getResponseCode());
-    assertNull(resp.getErrorMessage());
-    tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME);
-    cols = tbl.getSd().getCols();
-    assertEquals(1, cols.size());
-    assertTrue(cols.get(0).equals(new FieldSchema("a", "int",null)));
-    assertEquals(RCFileInputFormat.class.getName(),tbl.getSd().getInputFormat());
-    assertEquals(RCFileOutputFormat.class.getName(),tbl.getSd().getOutputFormat());
-
-    hcatDriver.run("drop table junit_sem_analysis");
-  }
-
-  @Test
-  public void testAlterTblTouch() throws CommandNeedRetryException{
-
-    hcatDriver.run("drop table junit_sem_analysis");
-    hcatDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
-    CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis touch");
-    assertEquals(0, response.getResponseCode());
-
-    hcatDriver.run("alter table junit_sem_analysis touch partition (b='12')");
-    assertEquals(0, response.getResponseCode());
-
-    hcatDriver.run("drop table junit_sem_analysis");
-  }
-
-  @Test
-  public void testChangeColumns() throws CommandNeedRetryException{
-    hcatDriver.run("drop table junit_sem_analysis");
-    hcatDriver.run("create table junit_sem_analysis (a int, c string) partitioned by (b string) stored as RCFILE");
-    CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis change a a1 int");
-    assertEquals(0, response.getResponseCode());
-
-    response = hcatDriver.run("alter table junit_sem_analysis change a1 a string");
-    assertEquals(0, response.getResponseCode());
-
-    response = hcatDriver.run("alter table junit_sem_analysis change a a int after c");
-    assertEquals(0, response.getResponseCode());
-    hcatDriver.run("drop table junit_sem_analysis");
-  }
-
-  @Test
-  public void testAddReplaceCols() throws IOException, MetaException, TException, NoSuchObjectException, CommandNeedRetryException{
-
-    hcatDriver.run("drop table junit_sem_analysis");
-    hcatDriver.run("create table junit_sem_analysis (a int, c string) partitioned by (b string) stored as RCFILE");
-    CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis replace columns (a1 tinyint)");
-    assertEquals(0, response.getResponseCode());
-
-    response = hcatDriver.run("alter table junit_sem_analysis add columns (d tinyint)");
-    assertEquals(0, response.getResponseCode());
-    assertNull(response.getErrorMessage());
-
-    response = hcatDriver.run("describe extended junit_sem_analysis");
-    assertEquals(0, response.getResponseCode());
-    Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME);
-    List<FieldSchema> cols = tbl.getSd().getCols();
-    assertEquals(2, cols.size());
-    assertTrue(cols.get(0).equals(new FieldSchema("a1", "tinyint", null)));
-    assertTrue(cols.get(1).equals(new FieldSchema("d", "tinyint", null)));
-    hcatDriver.run("drop table junit_sem_analysis");
-  }
-
-  @Test
-  public void testAlterTblClusteredBy() throws CommandNeedRetryException{
-
-    hcatDriver.run("drop table junit_sem_analysis");
-    hcatDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
-    CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis clustered by (a) into 7 buckets");
-    assertEquals(0, response.getResponseCode());
-    hcatDriver.run("drop table junit_sem_analysis");
-  }
-
-  @Test
-  public void testAlterTableSetFF() throws IOException, MetaException, TException, NoSuchObjectException, CommandNeedRetryException{
-
-    hcatDriver.run("drop table junit_sem_analysis");
-    hcatDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
-
-    Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME);
-    assertEquals(RCFileInputFormat.class.getName(), tbl.getSd().getInputFormat());
-    assertEquals(RCFileOutputFormat.class.getName(),tbl.getSd().getOutputFormat());
-
-    hcatDriver.run("alter table junit_sem_analysis set fileformat INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT " +
-        "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver'");
-    hcatDriver.run("desc extended junit_sem_analysis");
-
-    tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME);
-    assertEquals(RCFileInputFormat.class.getName(),tbl.getSd().getInputFormat());
-    assertEquals(RCFileOutputFormat.class.getName(),tbl.getSd().getOutputFormat());
-
-    hcatDriver.run("drop table junit_sem_analysis");
-  }
-
-  @Test
-  public void testAddPartFail() throws CommandNeedRetryException{
-
-    driver.run("drop table junit_sem_analysis");
-    driver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
-    CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis add partition (b='2') location 'README.txt'");
-    assertEquals(0, response.getResponseCode());
-    driver.run("drop table junit_sem_analysis");
-  }
-
-  @Test
-  public void testAddPartPass() throws IOException, CommandNeedRetryException{
-
-    hcatDriver.run("drop table junit_sem_analysis");
-    hcatDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
-    CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis add partition (b='2') location '" + TEST_DATA_DIR + "'");
-    assertEquals(0, response.getResponseCode());
-    assertNull(response.getErrorMessage());
-    hcatDriver.run("drop table junit_sem_analysis");
-  }
-
-  @Test
-  public void testCTAS() throws CommandNeedRetryException{
-    hcatDriver.run("drop table junit_sem_analysis");
-    query = "create table junit_sem_analysis (a int) as select * from tbl2";
-    CommandProcessorResponse response = hcatDriver.run(query);
-    assertEquals(40000, response.getResponseCode());
-    assertTrue(response.getErrorMessage().contains("FAILED: SemanticException Operation not supported. Create table as Select is not a valid operation."));
-    hcatDriver.run("drop table junit_sem_analysis");
-  }
-
-  @Test
-  public void testStoredAs() throws CommandNeedRetryException{
-    hcatDriver.run("drop table junit_sem_analysis");
-    query = "create table junit_sem_analysis (a int)";
-    CommandProcessorResponse response = hcatDriver.run(query);
-    assertEquals(0, response.getResponseCode());
-    hcatDriver.run("drop table junit_sem_analysis");
-  }
-
-  @Test
-  public void testAddDriverInfo() throws IOException, MetaException, TException, NoSuchObjectException, CommandNeedRetryException{
-
-    hcatDriver.run("drop table junit_sem_analysis");
-    query =  "create table junit_sem_analysis (a int) partitioned by (b string)  stored as " +
-        "INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT " +
-        "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver' ";
-    assertEquals(0,hcatDriver.run(query).getResponseCode());
-
-    Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME);
-    assertEquals(RCFileInputFormat.class.getName(),tbl.getSd().getInputFormat());
-    assertEquals(RCFileOutputFormat.class.getName(),tbl.getSd().getOutputFormat());
-
-    hcatDriver.run("drop table junit_sem_analysis");
-  }
-
-  @Test
-  public void testInvalidateNonStringPartition() throws IOException, CommandNeedRetryException{
-
-    hcatDriver.run("drop table junit_sem_analysis");
-    query =  "create table junit_sem_analysis (a int) partitioned by (b int)  stored as RCFILE";
-
-    CommandProcessorResponse response = hcatDriver.run(query);
-    assertEquals(40000,response.getResponseCode());
-    assertEquals("FAILED: SemanticException Operation not supported. HCatalog only supports partition columns of type string. For column: b Found type: int",
-        response.getErrorMessage());
-
-  }
-
-  @Test
-  public void testInvalidateSeqFileStoredAs() throws IOException, CommandNeedRetryException{
-
-    hcatDriver.run("drop table junit_sem_analysis");
-    query =  "create table junit_sem_analysis (a int) partitioned by (b string)  stored as SEQUENCEFILE";
-
-    CommandProcessorResponse response = hcatDriver.run(query);
-    assertEquals(0,response.getResponseCode());
-
-  }
-
-  @Test
-  public void testInvalidateTextFileStoredAs() throws IOException, CommandNeedRetryException{
-
-    hcatDriver.run("drop table junit_sem_analysis");
-    query =  "create table junit_sem_analysis (a int) partitioned by (b string)  stored as TEXTFILE";
-
-    CommandProcessorResponse response = hcatDriver.run(query);
-    assertEquals(0,response.getResponseCode());
-
-  }
-
-  @Test
-  public void testInvalidateClusteredBy() throws IOException, CommandNeedRetryException{
-
-    hcatDriver.run("drop table junit_sem_analysis");
-    query =  "create table junit_sem_analysis (a int) partitioned by (b string) clustered by (a) into 10 buckets stored as TEXTFILE";
-
-    CommandProcessorResponse response = hcatDriver.run(query);
-    assertEquals(0,response.getResponseCode());
-  }
-
-  @Test
-  public void testCTLFail() throws IOException, CommandNeedRetryException{
-
-    driver.run("drop table junit_sem_analysis");
-    driver.run("drop table like_table");
-    query =  "create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE";
-
-    driver.run(query);
-    query = "create table like_table like junit_sem_analysis";
-    CommandProcessorResponse response = hcatDriver.run(query);
-    assertEquals(0,response.getResponseCode());
-  }
-
-  @Test
-  public void testCTLPass() throws IOException, MetaException, TException, NoSuchObjectException, CommandNeedRetryException{
-
-    try{
-      hcatDriver.run("drop table junit_sem_analysis");
+    @Test
+    public void testCreateTblWithLowerCasePartNames() throws CommandNeedRetryException, MetaException, TException, NoSuchObjectException {
+        driver.run("drop table junit_sem_analysis");
+        CommandProcessorResponse resp = driver.run("create table junit_sem_analysis (a int) partitioned by (B string) stored as TEXTFILE");
+        assertEquals(resp.getResponseCode(), 0);
+        assertEquals(null, resp.getErrorMessage());
+        Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME);
+        assertEquals("Partition key name case problem", "b", tbl.getPartitionKeys().get(0).getName());
+        driver.run("drop table junit_sem_analysis");
     }
-    catch( Exception e){
-      LOG.error("Error in drop table.",e);
-    }
-    query =  "create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE";
 
-    hcatDriver.run(query);
-    String likeTbl = "like_table";
-    hcatDriver.run("drop table "+likeTbl);
-    query = "create table like_table like junit_sem_analysis";
-    CommandProcessorResponse resp = hcatDriver.run(query);
-    assertEquals(0, resp.getResponseCode());
+    @Test
+    public void testAlterTblFFpart() throws MetaException, TException, NoSuchObjectException, CommandNeedRetryException {
+
+        driver.run("drop table junit_sem_analysis");
+        driver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as TEXTFILE");
+        driver.run("alter table junit_sem_analysis add partition (b='2010-10-10')");
+        hcatDriver.run("alter table junit_sem_analysis partition (b='2010-10-10') set fileformat RCFILE");
+
+        Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME);
+        assertEquals(TextInputFormat.class.getName(), tbl.getSd().getInputFormat());
+        assertEquals(HiveIgnoreKeyTextOutputFormat.class.getName(), tbl.getSd().getOutputFormat());
+
+        List<String> partVals = new ArrayList<String>(1);
+        partVals.add("2010-10-10");
+        Partition part = client.getPartition(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME, partVals);
+
+        assertEquals(RCFileInputFormat.class.getName(), part.getSd().getInputFormat());
+        assertEquals(RCFileOutputFormat.class.getName(), part.getSd().getOutputFormat());
+
+        hcatDriver.run("drop table junit_sem_analysis");
+    }
+
+    @Test
+    public void testUsNonExistentDB() throws CommandNeedRetryException {
+        CommandProcessorResponse resp = hcatDriver.run("use no_such_db");
+        assertEquals(1, resp.getResponseCode());
+    }
+
+    @Test
+    public void testDatabaseOperations() throws MetaException, CommandNeedRetryException {
+
+        List<String> dbs = client.getAllDatabases();
+        String testDb1 = "testdatabaseoperatons1";
+        String testDb2 = "testdatabaseoperatons2";
+
+        if (dbs.contains(testDb1.toLowerCase())) {
+            assertEquals(0, hcatDriver.run("drop database " + testDb1).getResponseCode());
+        }
+
+        if (dbs.contains(testDb2.toLowerCase())) {
+            assertEquals(0, hcatDriver.run("drop database " + testDb2).getResponseCode());
+        }
+
+        assertEquals(0, hcatDriver.run("create database " + testDb1).getResponseCode());
+        assertTrue(client.getAllDatabases().contains(testDb1));
+        assertEquals(0, hcatDriver.run("create database if not exists " + testDb1).getResponseCode());
+        assertTrue(client.getAllDatabases().contains(testDb1));
+        assertEquals(0, hcatDriver.run("create database if not exists " + testDb2).getResponseCode());
+        assertTrue(client.getAllDatabases().contains(testDb2));
+
+        assertEquals(0, hcatDriver.run("drop database " + testDb1).getResponseCode());
+        assertEquals(0, hcatDriver.run("drop database " + testDb2).getResponseCode());
+        assertFalse(client.getAllDatabases().contains(testDb1));
+        assertFalse(client.getAllDatabases().contains(testDb2));
+    }
+
+    @Test
+    public void testCreateTableIfNotExists() throws MetaException, TException, NoSuchObjectException, CommandNeedRetryException {
+
+        hcatDriver.run("drop table " + TBL_NAME);
+        hcatDriver.run("create table junit_sem_analysis (a int) stored as RCFILE");
+        Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME);
+        List<FieldSchema> cols = tbl.getSd().getCols();
+        assertEquals(1, cols.size());
+        assertTrue(cols.get(0).equals(new FieldSchema("a", "int", null)));
+        assertEquals(RCFileInputFormat.class.getName(), tbl.getSd().getInputFormat());
+        assertEquals(RCFileOutputFormat.class.getName(), tbl.getSd().getOutputFormat());
+
+        CommandProcessorResponse resp = hcatDriver.run("create table if not exists junit_sem_analysis (a int) stored as RCFILE");
+        assertEquals(0, resp.getResponseCode());
+        assertNull(resp.getErrorMessage());
+        tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME);
+        cols = tbl.getSd().getCols();
+        assertEquals(1, cols.size());
+        assertTrue(cols.get(0).equals(new FieldSchema("a", "int", null)));
+        assertEquals(RCFileInputFormat.class.getName(), tbl.getSd().getInputFormat());
+        assertEquals(RCFileOutputFormat.class.getName(), tbl.getSd().getOutputFormat());
+
+        hcatDriver.run("drop table junit_sem_analysis");
+    }
+
+    @Test
+    public void testAlterTblTouch() throws CommandNeedRetryException {
+
+        hcatDriver.run("drop table junit_sem_analysis");
+        hcatDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
+        CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis touch");
+        assertEquals(0, response.getResponseCode());
+
+        hcatDriver.run("alter table junit_sem_analysis touch partition (b='12')");
+        assertEquals(0, response.getResponseCode());
+
+        hcatDriver.run("drop table junit_sem_analysis");
+    }
+
+    @Test
+    public void testChangeColumns() throws CommandNeedRetryException {
+        hcatDriver.run("drop table junit_sem_analysis");
+        hcatDriver.run("create table junit_sem_analysis (a int, c string) partitioned by (b string) stored as RCFILE");
+        CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis change a a1 int");
+        assertEquals(0, response.getResponseCode());
+
+        response = hcatDriver.run("alter table junit_sem_analysis change a1 a string");
+        assertEquals(0, response.getResponseCode());
+
+        response = hcatDriver.run("alter table junit_sem_analysis change a a int after c");
+        assertEquals(0, response.getResponseCode());
+        hcatDriver.run("drop table junit_sem_analysis");
+    }
+
+    @Test
+    public void testAddReplaceCols() throws IOException, MetaException, TException, NoSuchObjectException, CommandNeedRetryException {
+
+        hcatDriver.run("drop table junit_sem_analysis");
+        hcatDriver.run("create table junit_sem_analysis (a int, c string) partitioned by (b string) stored as RCFILE");
+        CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis replace columns (a1 tinyint)");
+        assertEquals(0, response.getResponseCode());
+
+        response = hcatDriver.run("alter table junit_sem_analysis add columns (d tinyint)");
+        assertEquals(0, response.getResponseCode());
+        assertNull(response.getErrorMessage());
+
+        response = hcatDriver.run("describe extended junit_sem_analysis");
+        assertEquals(0, response.getResponseCode());
+        Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME);
+        List<FieldSchema> cols = tbl.getSd().getCols();
+        assertEquals(2, cols.size());
+        assertTrue(cols.get(0).equals(new FieldSchema("a1", "tinyint", null)));
+        assertTrue(cols.get(1).equals(new FieldSchema("d", "tinyint", null)));
+        hcatDriver.run("drop table junit_sem_analysis");
+    }
+
+    @Test
+    public void testAlterTblClusteredBy() throws CommandNeedRetryException {
+
+        hcatDriver.run("drop table junit_sem_analysis");
+        hcatDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
+        CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis clustered by (a) into 7 buckets");
+        assertEquals(0, response.getResponseCode());
+        hcatDriver.run("drop table junit_sem_analysis");
+    }
+
+    @Test
+    public void testAlterTableSetFF() throws IOException, MetaException, TException, NoSuchObjectException, CommandNeedRetryException {
+
+        hcatDriver.run("drop table junit_sem_analysis");
+        hcatDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
+
+        Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME);
+        assertEquals(RCFileInputFormat.class.getName(), tbl.getSd().getInputFormat());
+        assertEquals(RCFileOutputFormat.class.getName(), tbl.getSd().getOutputFormat());
+
+        hcatDriver.run("alter table junit_sem_analysis set fileformat INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT " +
+                "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver'");
+        hcatDriver.run("desc extended junit_sem_analysis");
+
+        tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME);
+        assertEquals(RCFileInputFormat.class.getName(), tbl.getSd().getInputFormat());
+        assertEquals(RCFileOutputFormat.class.getName(), tbl.getSd().getOutputFormat());
+
+        hcatDriver.run("drop table junit_sem_analysis");
+    }
+
+    @Test
+    public void testAddPartFail() throws CommandNeedRetryException {
+
+        driver.run("drop table junit_sem_analysis");
+        driver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
+        CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis add partition (b='2') location 'README.txt'");
+        assertEquals(0, response.getResponseCode());
+        driver.run("drop table junit_sem_analysis");
+    }
+
+    @Test
+    public void testAddPartPass() throws IOException, CommandNeedRetryException {
+
+        hcatDriver.run("drop table junit_sem_analysis");
+        hcatDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
+        CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis add partition (b='2') location '" + TEST_DATA_DIR + "'");
+        assertEquals(0, response.getResponseCode());
+        assertNull(response.getErrorMessage());
+        hcatDriver.run("drop table junit_sem_analysis");
+    }
+
+    @Test
+    public void testCTAS() throws CommandNeedRetryException {
+        hcatDriver.run("drop table junit_sem_analysis");
+        query = "create table junit_sem_analysis (a int) as select * from tbl2";
+        CommandProcessorResponse response = hcatDriver.run(query);
+        assertEquals(40000, response.getResponseCode());
+        assertTrue(response.getErrorMessage().contains("FAILED: SemanticException Operation not supported. Create table as Select is not a valid operation."));
+        hcatDriver.run("drop table junit_sem_analysis");
+    }
+
+    @Test
+    public void testStoredAs() throws CommandNeedRetryException {
+        hcatDriver.run("drop table junit_sem_analysis");
+        query = "create table junit_sem_analysis (a int)";
+        CommandProcessorResponse response = hcatDriver.run(query);
+        assertEquals(0, response.getResponseCode());
+        hcatDriver.run("drop table junit_sem_analysis");
+    }
+
+    @Test
+    public void testAddDriverInfo() throws IOException, MetaException, TException, NoSuchObjectException, CommandNeedRetryException {
+
+        hcatDriver.run("drop table junit_sem_analysis");
+        query = "create table junit_sem_analysis (a int) partitioned by (b string)  stored as " +
+                "INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT " +
+                "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver' ";
+        assertEquals(0, hcatDriver.run(query).getResponseCode());
+
+        Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME);
+        assertEquals(RCFileInputFormat.class.getName(), tbl.getSd().getInputFormat());
+        assertEquals(RCFileOutputFormat.class.getName(), tbl.getSd().getOutputFormat());
+
+        hcatDriver.run("drop table junit_sem_analysis");
+    }
+
+    @Test
+    public void testInvalidateNonStringPartition() throws IOException, CommandNeedRetryException {
+
+        hcatDriver.run("drop table junit_sem_analysis");
+        query = "create table junit_sem_analysis (a int) partitioned by (b int)  stored as RCFILE";
+
+        CommandProcessorResponse response = hcatDriver.run(query);
+        assertEquals(40000, response.getResponseCode());
+        assertEquals("FAILED: SemanticException Operation not supported. HCatalog only supports partition columns of type string. For column: b Found type: int",
+                response.getErrorMessage());
+
+    }
+
+    @Test
+    public void testInvalidateSeqFileStoredAs() throws IOException, CommandNeedRetryException {
+
+        hcatDriver.run("drop table junit_sem_analysis");
+        query = "create table junit_sem_analysis (a int) partitioned by (b string)  stored as SEQUENCEFILE";
+
+        CommandProcessorResponse response = hcatDriver.run(query);
+        assertEquals(0, response.getResponseCode());
+
+    }
+
+    @Test
+    public void testInvalidateTextFileStoredAs() throws IOException, CommandNeedRetryException {
+
+        hcatDriver.run("drop table junit_sem_analysis");
+        query = "create table junit_sem_analysis (a int) partitioned by (b string)  stored as TEXTFILE";
+
+        CommandProcessorResponse response = hcatDriver.run(query);
+        assertEquals(0, response.getResponseCode());
+
+    }
+
+    @Test
+    public void testInvalidateClusteredBy() throws IOException, CommandNeedRetryException {
+
+        hcatDriver.run("drop table junit_sem_analysis");
+        query = "create table junit_sem_analysis (a int) partitioned by (b string) clustered by (a) into 10 buckets stored as TEXTFILE";
+
+        CommandProcessorResponse response = hcatDriver.run(query);
+        assertEquals(0, response.getResponseCode());
+    }
+
+    @Test
+    public void testCTLFail() throws IOException, CommandNeedRetryException {
+
+        driver.run("drop table junit_sem_analysis");
+        driver.run("drop table like_table");
+        query = "create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE";
+
+        driver.run(query);
+        query = "create table like_table like junit_sem_analysis";
+        CommandProcessorResponse response = hcatDriver.run(query);
+        assertEquals(0, response.getResponseCode());
+    }
+
+    @Test
+    public void testCTLPass() throws IOException, MetaException, TException, NoSuchObjectException, CommandNeedRetryException {
+
+        try {
+            hcatDriver.run("drop table junit_sem_analysis");
+        } catch (Exception e) {
+            LOG.error("Error in drop table.", e);
+        }
+        query = "create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE";
+
+        hcatDriver.run(query);
+        String likeTbl = "like_table";
+        hcatDriver.run("drop table " + likeTbl);
+        query = "create table like_table like junit_sem_analysis";
+        CommandProcessorResponse resp = hcatDriver.run(query);
+        assertEquals(0, resp.getResponseCode());
 //    Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, likeTbl);
 //    assertEquals(likeTbl,tbl.getTableName());
 //    List<FieldSchema> cols = tbl.getSd().getCols();
@@ -399,7 +398,7 @@
 //
 //    hcatDriver.run("drop table junit_sem_analysis");
 //    hcatDriver.run("drop table "+likeTbl);
-  }
+    }
 
 // This test case currently fails, since add partitions don't inherit anything from tables.
 
diff --git a/src/test/org/apache/hcatalog/cli/TestUseDatabase.java b/src/test/org/apache/hcatalog/cli/TestUseDatabase.java
index 403a13f..e97a9c2 100644
--- a/src/test/org/apache/hcatalog/cli/TestUseDatabase.java
+++ b/src/test/org/apache/hcatalog/cli/TestUseDatabase.java
@@ -31,46 +31,46 @@
 import org.apache.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
 
 /* Unit test for GitHub Howl issue #3 */
-public class TestUseDatabase extends TestCase{
+public class TestUseDatabase extends TestCase {
 
-  private Driver hcatDriver;
+    private Driver hcatDriver;
 
-  @Override
-  protected void setUp() throws Exception {
+    @Override
+    protected void setUp() throws Exception {
 
-    HiveConf hcatConf = new HiveConf(this.getClass());
-    hcatConf.set(ConfVars.PREEXECHOOKS.varname, "");
-    hcatConf.set(ConfVars.POSTEXECHOOKS.varname, "");
-    hcatConf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+        HiveConf hcatConf = new HiveConf(this.getClass());
+        hcatConf.set(ConfVars.PREEXECHOOKS.varname, "");
+        hcatConf.set(ConfVars.POSTEXECHOOKS.varname, "");
+        hcatConf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
 
-    hcatConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
-    hcatDriver = new Driver(hcatConf);
-    SessionState.start(new CliSessionState(hcatConf));
-  }
+        hcatConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
+        hcatDriver = new Driver(hcatConf);
+        SessionState.start(new CliSessionState(hcatConf));
+    }
 
-  String query;
-  private final String dbName = "testUseDatabase_db";
-  private final String tblName = "testUseDatabase_tbl";
+    String query;
+    private final String dbName = "testUseDatabase_db";
+    private final String tblName = "testUseDatabase_tbl";
 
-  public void testAlterTablePass() throws IOException, CommandNeedRetryException{
+    public void testAlterTablePass() throws IOException, CommandNeedRetryException {
 
-    hcatDriver.run("create database " + dbName);
-    hcatDriver.run("use " + dbName);
-    hcatDriver.run("create table " + tblName + " (a int) partitioned by (b string) stored as RCFILE");
+        hcatDriver.run("create database " + dbName);
+        hcatDriver.run("use " + dbName);
+        hcatDriver.run("create table " + tblName + " (a int) partitioned by (b string) stored as RCFILE");
 
-    CommandProcessorResponse response;
+        CommandProcessorResponse response;
 
-    response = hcatDriver.run("alter table " + tblName + " add partition (b='2') location '/tmp'");
-    assertEquals(0, response.getResponseCode());
-    assertNull(response.getErrorMessage());
+        response = hcatDriver.run("alter table " + tblName + " add partition (b='2') location '/tmp'");
+        assertEquals(0, response.getResponseCode());
+        assertNull(response.getErrorMessage());
 
-    response = hcatDriver.run("alter table " + tblName + " set fileformat INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT " +
-        "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver'");
-    assertEquals(0, response.getResponseCode());
-    assertNull(response.getErrorMessage());
+        response = hcatDriver.run("alter table " + tblName + " set fileformat INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT " +
+                "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver'");
+        assertEquals(0, response.getResponseCode());
+        assertNull(response.getErrorMessage());
 
-    hcatDriver.run("drop table " + tblName);
-    hcatDriver.run("drop database " + dbName);
-  }
+        hcatDriver.run("drop table " + tblName);
+        hcatDriver.run("drop database " + dbName);
+    }
 
 }
diff --git a/src/test/org/apache/hcatalog/common/TestHCatUtil.java b/src/test/org/apache/hcatalog/common/TestHCatUtil.java
index 29675ce..0adba5a 100644
--- a/src/test/org/apache/hcatalog/common/TestHCatUtil.java
+++ b/src/test/org/apache/hcatalog/common/TestHCatUtil.java
@@ -41,142 +41,142 @@
 
 public class TestHCatUtil {
 
-  @Test
-  public void testFsPermissionOperation(){
+    @Test
+    public void testFsPermissionOperation() {
 
-    HashMap<String,Integer> permsCode = new HashMap<String,Integer>();
+        HashMap<String, Integer> permsCode = new HashMap<String, Integer>();
 
-    for (int i = 0; i < 8; i++){
-      for (int j = 0; j < 8; j++){
-        for (int k = 0; k < 8; k++){
-          StringBuilder sb = new StringBuilder();
-          sb.append("0");
-          sb.append(i);
-          sb.append(j);
-          sb.append(k);
-          Integer code = (((i*8)+j)*8)+k;
-          String perms = (new FsPermission(Short.decode(sb.toString()))).toString();
-          if (permsCode.containsKey(perms)){
-            Assert.assertEquals("permissions(" + perms + ") mapped to multiple codes", code, permsCode.get(perms));
-          }
-          permsCode.put(perms, code);
-          assertFsPermissionTransformationIsGood(perms);
+        for (int i = 0; i < 8; i++) {
+            for (int j = 0; j < 8; j++) {
+                for (int k = 0; k < 8; k++) {
+                    StringBuilder sb = new StringBuilder();
+                    sb.append("0");
+                    sb.append(i);
+                    sb.append(j);
+                    sb.append(k);
+                    Integer code = (((i * 8) + j) * 8) + k;
+                    String perms = (new FsPermission(Short.decode(sb.toString()))).toString();
+                    if (permsCode.containsKey(perms)) {
+                        Assert.assertEquals("permissions(" + perms + ") mapped to multiple codes", code, permsCode.get(perms));
+                    }
+                    permsCode.put(perms, code);
+                    assertFsPermissionTransformationIsGood(perms);
+                }
+            }
         }
-      }
     }
-  }
 
-  private void assertFsPermissionTransformationIsGood(String perms) {
-    Assert.assertEquals(perms, FsPermission.valueOf("-" + perms).toString());
-  }
+    private void assertFsPermissionTransformationIsGood(String perms) {
+        Assert.assertEquals(perms, FsPermission.valueOf("-" + perms).toString());
+    }
 
-  @Test
-  public void testValidateMorePermissive(){
-    assertConsistentFsPermissionBehaviour(FsAction.ALL,true,true,true,true,true,true,true,true);
-    assertConsistentFsPermissionBehaviour(FsAction.READ,false,true,false,true,false,false,false,false);
-    assertConsistentFsPermissionBehaviour(FsAction.WRITE,false,true,false,false,true,false,false,false);
-    assertConsistentFsPermissionBehaviour(FsAction.EXECUTE,false,true,true,false,false,false,false,false);
-    assertConsistentFsPermissionBehaviour(FsAction.READ_EXECUTE,false,true,true,true,false,true,false,false);
-    assertConsistentFsPermissionBehaviour(FsAction.READ_WRITE,false,true,false,true,true,false,true,false);
-    assertConsistentFsPermissionBehaviour(FsAction.WRITE_EXECUTE,false,true,true,false,true,false,false,true);
-    assertConsistentFsPermissionBehaviour(FsAction.NONE,false,true,false,false,false,false,false,false);
-  }
+    @Test
+    public void testValidateMorePermissive() {
+        assertConsistentFsPermissionBehaviour(FsAction.ALL, true, true, true, true, true, true, true, true);
+        assertConsistentFsPermissionBehaviour(FsAction.READ, false, true, false, true, false, false, false, false);
+        assertConsistentFsPermissionBehaviour(FsAction.WRITE, false, true, false, false, true, false, false, false);
+        assertConsistentFsPermissionBehaviour(FsAction.EXECUTE, false, true, true, false, false, false, false, false);
+        assertConsistentFsPermissionBehaviour(FsAction.READ_EXECUTE, false, true, true, true, false, true, false, false);
+        assertConsistentFsPermissionBehaviour(FsAction.READ_WRITE, false, true, false, true, true, false, true, false);
+        assertConsistentFsPermissionBehaviour(FsAction.WRITE_EXECUTE, false, true, true, false, true, false, false, true);
+        assertConsistentFsPermissionBehaviour(FsAction.NONE, false, true, false, false, false, false, false, false);
+    }
 
 
-  private void assertConsistentFsPermissionBehaviour(
-      FsAction base, boolean versusAll, boolean versusNone,
-      boolean versusX, boolean versusR, boolean versusW,
-      boolean versusRX, boolean versusRW,  boolean versusWX){
+    private void assertConsistentFsPermissionBehaviour(
+            FsAction base, boolean versusAll, boolean versusNone,
+            boolean versusX, boolean versusR, boolean versusW,
+            boolean versusRX, boolean versusRW, boolean versusWX) {
 
-    Assert.assertTrue(versusAll == HCatUtil.validateMorePermissive(base, FsAction.ALL));
-    Assert.assertTrue(versusX == HCatUtil.validateMorePermissive(base, FsAction.EXECUTE));
-    Assert.assertTrue(versusNone == HCatUtil.validateMorePermissive(base, FsAction.NONE));
-    Assert.assertTrue(versusR == HCatUtil.validateMorePermissive(base, FsAction.READ));
-    Assert.assertTrue(versusRX == HCatUtil.validateMorePermissive(base, FsAction.READ_EXECUTE));
-    Assert.assertTrue(versusRW == HCatUtil.validateMorePermissive(base, FsAction.READ_WRITE));
-    Assert.assertTrue(versusW == HCatUtil.validateMorePermissive(base, FsAction.WRITE));
-    Assert.assertTrue(versusWX == HCatUtil.validateMorePermissive(base, FsAction.WRITE_EXECUTE));
-  }
+        Assert.assertTrue(versusAll == HCatUtil.validateMorePermissive(base, FsAction.ALL));
+        Assert.assertTrue(versusX == HCatUtil.validateMorePermissive(base, FsAction.EXECUTE));
+        Assert.assertTrue(versusNone == HCatUtil.validateMorePermissive(base, FsAction.NONE));
+        Assert.assertTrue(versusR == HCatUtil.validateMorePermissive(base, FsAction.READ));
+        Assert.assertTrue(versusRX == HCatUtil.validateMorePermissive(base, FsAction.READ_EXECUTE));
+        Assert.assertTrue(versusRW == HCatUtil.validateMorePermissive(base, FsAction.READ_WRITE));
+        Assert.assertTrue(versusW == HCatUtil.validateMorePermissive(base, FsAction.WRITE));
+        Assert.assertTrue(versusWX == HCatUtil.validateMorePermissive(base, FsAction.WRITE_EXECUTE));
+    }
 
-  @Test
-  public void testExecutePermissionsCheck(){
-    Assert.assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.ALL));
-    Assert.assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.NONE));
-    Assert.assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.EXECUTE));
-    Assert.assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.READ_EXECUTE));
-    Assert.assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.WRITE_EXECUTE));
+    @Test
+    public void testExecutePermissionsCheck() {
+        Assert.assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.ALL));
+        Assert.assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.NONE));
+        Assert.assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.EXECUTE));
+        Assert.assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.READ_EXECUTE));
+        Assert.assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.WRITE_EXECUTE));
 
-    Assert.assertFalse(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.READ));
-    Assert.assertFalse(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.WRITE));
-    Assert.assertFalse(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.READ_WRITE));
+        Assert.assertFalse(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.READ));
+        Assert.assertFalse(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.WRITE));
+        Assert.assertFalse(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.READ_WRITE));
 
-  }
+    }
 
-  @Test
-  public void testGetTableSchemaWithPtnColsApi() throws IOException {
-    // Check the schema of a table with one field & no partition keys.
-    StorageDescriptor sd = new StorageDescriptor(
-        Lists.newArrayList(new FieldSchema("username", Constants.STRING_TYPE_NAME, null)),
-        "location", "org.apache.hadoop.mapred.TextInputFormat",
-        "org.apache.hadoop.mapred.TextOutputFormat", false, -1, new SerDeInfo(),
-        new ArrayList<String>(), new ArrayList<Order>(), new HashMap<String, String>());
-    org.apache.hadoop.hive.metastore.api.Table apiTable =
-        new org.apache.hadoop.hive.metastore.api.Table("test_tblname", "test_dbname", "test_owner",
-            0, 0, 0, sd, new ArrayList<FieldSchema>(), new HashMap<String, String>(),
-            "viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name());
-    Table table = new Table(apiTable);
+    @Test
+    public void testGetTableSchemaWithPtnColsApi() throws IOException {
+        // Check the schema of a table with one field & no partition keys.
+        StorageDescriptor sd = new StorageDescriptor(
+                Lists.newArrayList(new FieldSchema("username", Constants.STRING_TYPE_NAME, null)),
+                "location", "org.apache.hadoop.mapred.TextInputFormat",
+                "org.apache.hadoop.mapred.TextOutputFormat", false, -1, new SerDeInfo(),
+                new ArrayList<String>(), new ArrayList<Order>(), new HashMap<String, String>());
+        org.apache.hadoop.hive.metastore.api.Table apiTable =
+                new org.apache.hadoop.hive.metastore.api.Table("test_tblname", "test_dbname", "test_owner",
+                        0, 0, 0, sd, new ArrayList<FieldSchema>(), new HashMap<String, String>(),
+                        "viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name());
+        Table table = new Table(apiTable);
 
-    List<HCatFieldSchema> expectedHCatSchema =
-        Lists.newArrayList(new HCatFieldSchema("username", HCatFieldSchema.Type.STRING, null));
+        List<HCatFieldSchema> expectedHCatSchema =
+                Lists.newArrayList(new HCatFieldSchema("username", HCatFieldSchema.Type.STRING, null));
 
-    Assert.assertEquals(new HCatSchema(expectedHCatSchema),
-        HCatUtil.getTableSchemaWithPtnCols(table));
+        Assert.assertEquals(new HCatSchema(expectedHCatSchema),
+                HCatUtil.getTableSchemaWithPtnCols(table));
 
-    // Add a partition key & ensure its reflected in the schema.
-    List<FieldSchema> partitionKeys =
-        Lists.newArrayList(new FieldSchema("dt", Constants.STRING_TYPE_NAME, null));
-    table.getTTable().setPartitionKeys(partitionKeys);
-    expectedHCatSchema.add(new HCatFieldSchema("dt", HCatFieldSchema.Type.STRING, null));
-    Assert.assertEquals(new HCatSchema(expectedHCatSchema),
-        HCatUtil.getTableSchemaWithPtnCols(table));
-  }
+        // Add a partition key & ensure its reflected in the schema.
+        List<FieldSchema> partitionKeys =
+                Lists.newArrayList(new FieldSchema("dt", Constants.STRING_TYPE_NAME, null));
+        table.getTTable().setPartitionKeys(partitionKeys);
+        expectedHCatSchema.add(new HCatFieldSchema("dt", HCatFieldSchema.Type.STRING, null));
+        Assert.assertEquals(new HCatSchema(expectedHCatSchema),
+                HCatUtil.getTableSchemaWithPtnCols(table));
+    }
 
-  /**
-   * Hive represents tables in two ways:
-   * <ul>
-   *   <li>org.apache.hadoop.hive.metastore.api.Table - exactly whats stored in the metastore</li>
-   *   <li>org.apache.hadoop.hive.ql.metadata.Table - adds business logic over api.Table</li>
-   * </ul>
-   * Here we check SerDe-reported fields are included in the table schema.
-   */
-  @Test
-  public void testGetTableSchemaWithPtnColsSerDeReportedFields() throws IOException {
-    Map<String, String> parameters = Maps.newHashMap();
-    parameters.put(Constants.SERIALIZATION_CLASS,
-        "org.apache.hadoop.hive.serde2.thrift.test.IntString");
-    parameters.put(Constants.SERIALIZATION_FORMAT, "org.apache.thrift.protocol.TBinaryProtocol");
+    /**
+     * Hive represents tables in two ways:
+     * <ul>
+     *   <li>org.apache.hadoop.hive.metastore.api.Table - exactly whats stored in the metastore</li>
+     *   <li>org.apache.hadoop.hive.ql.metadata.Table - adds business logic over api.Table</li>
+     * </ul>
+     * Here we check SerDe-reported fields are included in the table schema.
+     */
+    @Test
+    public void testGetTableSchemaWithPtnColsSerDeReportedFields() throws IOException {
+        Map<String, String> parameters = Maps.newHashMap();
+        parameters.put(Constants.SERIALIZATION_CLASS,
+                "org.apache.hadoop.hive.serde2.thrift.test.IntString");
+        parameters.put(Constants.SERIALIZATION_FORMAT, "org.apache.thrift.protocol.TBinaryProtocol");
 
-    SerDeInfo serDeInfo = new SerDeInfo(null,
-        "org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer", parameters);
+        SerDeInfo serDeInfo = new SerDeInfo(null,
+                "org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer", parameters);
 
-    // StorageDescriptor has an empty list of fields - SerDe will report them.
-    StorageDescriptor sd = new StorageDescriptor(new ArrayList<FieldSchema>(), "location",
-        "org.apache.hadoop.mapred.TextInputFormat", "org.apache.hadoop.mapred.TextOutputFormat",
-        false, -1, serDeInfo, new ArrayList<String>(), new ArrayList<Order>(),
-        new HashMap<String, String>());
+        // StorageDescriptor has an empty list of fields - SerDe will report them.
+        StorageDescriptor sd = new StorageDescriptor(new ArrayList<FieldSchema>(), "location",
+                "org.apache.hadoop.mapred.TextInputFormat", "org.apache.hadoop.mapred.TextOutputFormat",
+                false, -1, serDeInfo, new ArrayList<String>(), new ArrayList<Order>(),
+                new HashMap<String, String>());
 
-    org.apache.hadoop.hive.metastore.api.Table apiTable =
-        new org.apache.hadoop.hive.metastore.api.Table("test_tblname", "test_dbname", "test_owner",
-            0, 0, 0, sd, new ArrayList<FieldSchema>(), new HashMap<String, String>(),
-            "viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name());
-    Table table = new Table(apiTable);
+        org.apache.hadoop.hive.metastore.api.Table apiTable =
+                new org.apache.hadoop.hive.metastore.api.Table("test_tblname", "test_dbname", "test_owner",
+                        0, 0, 0, sd, new ArrayList<FieldSchema>(), new HashMap<String, String>(),
+                        "viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name());
+        Table table = new Table(apiTable);
 
-    List<HCatFieldSchema> expectedHCatSchema = Lists.newArrayList(
-        new HCatFieldSchema("myint", HCatFieldSchema.Type.INT, null),
-        new HCatFieldSchema("mystring", HCatFieldSchema.Type.STRING, null),
-        new HCatFieldSchema("underscore_int", HCatFieldSchema.Type.INT, null));
+        List<HCatFieldSchema> expectedHCatSchema = Lists.newArrayList(
+                new HCatFieldSchema("myint", HCatFieldSchema.Type.INT, null),
+                new HCatFieldSchema("mystring", HCatFieldSchema.Type.STRING, null),
+                new HCatFieldSchema("underscore_int", HCatFieldSchema.Type.INT, null));
 
-    Assert.assertEquals(new HCatSchema(expectedHCatSchema),
-        HCatUtil.getTableSchemaWithPtnCols(table));
-  }
+        Assert.assertEquals(new HCatSchema(expectedHCatSchema),
+                HCatUtil.getTableSchemaWithPtnCols(table));
+    }
 }
diff --git a/src/test/org/apache/hcatalog/common/TestHiveClientCache.java b/src/test/org/apache/hcatalog/common/TestHiveClientCache.java
index 91c9e38..286f32c 100644
--- a/src/test/org/apache/hcatalog/common/TestHiveClientCache.java
+++ b/src/test/org/apache/hcatalog/common/TestHiveClientCache.java
@@ -34,6 +34,7 @@
 import org.apache.thrift.TException;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
+
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -74,13 +75,13 @@
     public void testCacheHit() throws IOException, MetaException, LoginException {
 
         HiveClientCache cache = new HiveClientCache(1000);
-        HiveMetaStoreClient  client = cache.get(hiveConf);
+        HiveMetaStoreClient client = cache.get(hiveConf);
         assertNotNull(client);
         client.close(); // close shouldn't matter
 
         // Setting a non important configuration should return the same client only
         hiveConf.setIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS, 10);
-        HiveMetaStoreClient  client2 = cache.get(hiveConf);
+        HiveMetaStoreClient client2 = cache.get(hiveConf);
         assertNotNull(client2);
         assertEquals(client, client2);
         client2.close();
@@ -89,12 +90,12 @@
     @Test
     public void testCacheMiss() throws IOException, MetaException, LoginException {
         HiveClientCache cache = new HiveClientCache(1000);
-        HiveMetaStoreClient  client = cache.get(hiveConf);
+        HiveMetaStoreClient client = cache.get(hiveConf);
         assertNotNull(client);
 
         // Set different uri as it is one of the criteria deciding whether to return the same client or not
         hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, " "); // URIs are checked for string equivalence, even spaces make them different
-        HiveMetaStoreClient  client2 = cache.get(hiveConf);
+        HiveMetaStoreClient client2 = cache.get(hiveConf);
         assertNotNull(client2);
         assertNotSame(client, client2);
     }
@@ -106,7 +107,7 @@
     @Test
     public void testCacheExpiry() throws IOException, MetaException, LoginException, InterruptedException {
         HiveClientCache cache = new HiveClientCache(1);
-        HiveClientCache.CacheableHiveMetaStoreClient client = (HiveClientCache.CacheableHiveMetaStoreClient)cache.get(hiveConf);
+        HiveClientCache.CacheableHiveMetaStoreClient client = (HiveClientCache.CacheableHiveMetaStoreClient) cache.get(hiveConf);
         assertNotNull(client);
 
         Thread.sleep(2500);
@@ -165,9 +166,9 @@
      */
     @Test
     public void testHMSCBreakability() throws IOException, MetaException, LoginException, TException, AlreadyExistsException,
-        InvalidObjectException, NoSuchObjectException, InterruptedException {
+            InvalidObjectException, NoSuchObjectException, InterruptedException {
         // Setup
-        LocalMetaServer metaServer =  new LocalMetaServer();
+        LocalMetaServer metaServer = new LocalMetaServer();
         metaServer.start();
 
         final HiveClientCache cache = new HiveClientCache(1000);
@@ -253,6 +254,7 @@
         public HiveConf getHiveConf() {
             return hiveConf;
         }
+
         public void shutDown() {
             System.setSecurityManager(securityManager);
         }
diff --git a/src/test/org/apache/hcatalog/data/HCatDataCheckUtil.java b/src/test/org/apache/hcatalog/data/HCatDataCheckUtil.java
index 1973285..dc09fdb 100644
--- a/src/test/org/apache/hcatalog/data/HCatDataCheckUtil.java
+++ b/src/test/org/apache/hcatalog/data/HCatDataCheckUtil.java
@@ -36,78 +36,78 @@
  */
 public class HCatDataCheckUtil {
 
-  private static final Logger LOG = LoggerFactory.getLogger(HCatDataCheckUtil.class);
+    private static final Logger LOG = LoggerFactory.getLogger(HCatDataCheckUtil.class);
 
-  public static Driver instantiateDriver(MiniCluster cluster) {
-    HiveConf hiveConf = new HiveConf(HCatDataCheckUtil.class);
-    for (Entry e : cluster.getProperties().entrySet()){
-      hiveConf.set(e.getKey().toString(), e.getValue().toString());
-    }
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-
-    LOG.debug("Hive conf : {}", hiveConf.getAllProperties());
-    Driver driver = new Driver(hiveConf);
-    SessionState.start(new CliSessionState(hiveConf));
-    return driver;
-  }
-
-  public static void generateDataFile(MiniCluster cluster, String fileName) throws IOException {
-    MiniCluster.deleteFile(cluster, fileName);
-    String[] input = new String[50];
-    for(int i = 0; i < 50; i++) {
-      input[i] = (i % 5) + "\t" + i  + "\t" + "_S" + i + "S_";
-    }
-    MiniCluster.createInputFile(cluster, fileName, input);
-  }
-
-  public static void createTable(Driver driver, String tableName, String createTableArgs)
-      throws CommandNeedRetryException, IOException {
-    String createTable = "create table " + tableName + createTableArgs;
-    int retCode = driver.run(createTable).getResponseCode();
-    if(retCode != 0) {
-      throw new IOException("Failed to create table. ["+createTable+"], return code from hive driver : ["+retCode+"]");
-    }
-  }
-
-  public static void dropTable(Driver driver, String tablename) throws IOException, CommandNeedRetryException{
-    driver.run("drop table if exists "+tablename);
-  }
-
-  public static ArrayList<String> formattedRun(Driver driver, String name, String selectCmd)
-      throws CommandNeedRetryException, IOException {
-    driver.run(selectCmd);
-    ArrayList<String> src_values = new ArrayList<String>();
-    driver.getResults(src_values);
-    LOG.info("{} : {}", name, src_values);
-    return src_values;
-  }
-
-
-  public static boolean recordsEqual(HCatRecord first, HCatRecord second) {
-    return (compareRecords(first,second) == 0);
-  }
-
-  public static int compareRecords(HCatRecord first, HCatRecord second) {
-    return compareRecordContents(first.getAll(), second.getAll());
-  }
-
-  public static int compareRecordContents(List<Object> first, List<Object> second) {
-    int mySz = first.size();
-    int urSz = second.size();
-    if(mySz != urSz) {
-      return mySz - urSz;
-    } else {
-      for (int i = 0; i < first.size(); i++) {
-        int c = DataType.compare(first.get(i), second.get(i));
-        if (c != 0) {
-          return c;
+    public static Driver instantiateDriver(MiniCluster cluster) {
+        HiveConf hiveConf = new HiveConf(HCatDataCheckUtil.class);
+        for (Entry e : cluster.getProperties().entrySet()) {
+            hiveConf.set(e.getKey().toString(), e.getValue().toString());
         }
-      }
-      return 0;
+        hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+        hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+        hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+
+        LOG.debug("Hive conf : {}", hiveConf.getAllProperties());
+        Driver driver = new Driver(hiveConf);
+        SessionState.start(new CliSessionState(hiveConf));
+        return driver;
     }
-  }
+
+    public static void generateDataFile(MiniCluster cluster, String fileName) throws IOException {
+        MiniCluster.deleteFile(cluster, fileName);
+        String[] input = new String[50];
+        for (int i = 0; i < 50; i++) {
+            input[i] = (i % 5) + "\t" + i + "\t" + "_S" + i + "S_";
+        }
+        MiniCluster.createInputFile(cluster, fileName, input);
+    }
+
+    public static void createTable(Driver driver, String tableName, String createTableArgs)
+        throws CommandNeedRetryException, IOException {
+        String createTable = "create table " + tableName + createTableArgs;
+        int retCode = driver.run(createTable).getResponseCode();
+        if (retCode != 0) {
+            throw new IOException("Failed to create table. [" + createTable + "], return code from hive driver : [" + retCode + "]");
+        }
+    }
+
+    public static void dropTable(Driver driver, String tablename) throws IOException, CommandNeedRetryException {
+        driver.run("drop table if exists " + tablename);
+    }
+
+    public static ArrayList<String> formattedRun(Driver driver, String name, String selectCmd)
+        throws CommandNeedRetryException, IOException {
+        driver.run(selectCmd);
+        ArrayList<String> src_values = new ArrayList<String>();
+        driver.getResults(src_values);
+        LOG.info("{} : {}", name, src_values);
+        return src_values;
+    }
+
+
+    public static boolean recordsEqual(HCatRecord first, HCatRecord second) {
+        return (compareRecords(first, second) == 0);
+    }
+
+    public static int compareRecords(HCatRecord first, HCatRecord second) {
+        return compareRecordContents(first.getAll(), second.getAll());
+    }
+
+    public static int compareRecordContents(List<Object> first, List<Object> second) {
+        int mySz = first.size();
+        int urSz = second.size();
+        if (mySz != urSz) {
+            return mySz - urSz;
+        } else {
+            for (int i = 0; i < first.size(); i++) {
+                int c = DataType.compare(first.get(i), second.get(i));
+                if (c != 0) {
+                    return c;
+                }
+            }
+            return 0;
+        }
+    }
 
 
 }
diff --git a/src/test/org/apache/hcatalog/data/TestDefaultHCatRecord.java b/src/test/org/apache/hcatalog/data/TestDefaultHCatRecord.java
index 4537219..f86eee0 100644
--- a/src/test/org/apache/hcatalog/data/TestDefaultHCatRecord.java
+++ b/src/test/org/apache/hcatalog/data/TestDefaultHCatRecord.java
@@ -39,221 +39,221 @@
 import junit.framework.Assert;
 import junit.framework.TestCase;
 
-public class TestDefaultHCatRecord extends TestCase{
+public class TestDefaultHCatRecord extends TestCase {
 
-  public void testRYW() throws IOException{
+    public void testRYW() throws IOException {
 
-    File f = new File("binary.dat");
-    f.delete();
-    f.createNewFile();
-    f.deleteOnExit();
+        File f = new File("binary.dat");
+        f.delete();
+        f.createNewFile();
+        f.deleteOnExit();
 
-    OutputStream fileOutStream = new FileOutputStream(f);
-    DataOutput outStream = new DataOutputStream(fileOutStream);
+        OutputStream fileOutStream = new FileOutputStream(f);
+        DataOutput outStream = new DataOutputStream(fileOutStream);
 
-    HCatRecord[]  recs = getHCatRecords();
-    for(int i =0; i < recs.length; i++){
-      recs[i].write(outStream);
-    }
-    fileOutStream.flush();
-    fileOutStream.close();
+        HCatRecord[] recs = getHCatRecords();
+        for (int i = 0; i < recs.length; i++) {
+            recs[i].write(outStream);
+        }
+        fileOutStream.flush();
+        fileOutStream.close();
 
-    InputStream fInStream = new FileInputStream(f);
-    DataInput inpStream = new DataInputStream(fInStream);
+        InputStream fInStream = new FileInputStream(f);
+        DataInput inpStream = new DataInputStream(fInStream);
 
-    for(int i =0; i < recs.length; i++){
-      HCatRecord rec = new DefaultHCatRecord();
-      rec.readFields(inpStream);
-      Assert.assertTrue(HCatDataCheckUtil.recordsEqual(recs[i],rec));
+        for (int i = 0; i < recs.length; i++) {
+            HCatRecord rec = new DefaultHCatRecord();
+            rec.readFields(inpStream);
+            Assert.assertTrue(HCatDataCheckUtil.recordsEqual(recs[i], rec));
+        }
+
+        Assert.assertEquals(fInStream.available(), 0);
+        fInStream.close();
+
     }
 
-    Assert.assertEquals(fInStream.available(), 0);
-    fInStream.close();
+    public void testCompareTo() {
+        HCatRecord[] recs = getHCatRecords();
+        Assert.assertTrue(HCatDataCheckUtil.compareRecords(recs[0], recs[1]) == 0);
+        Assert.assertTrue(HCatDataCheckUtil.compareRecords(recs[4], recs[5]) == 0);
+    }
 
-  }
+    public void testEqualsObject() {
 
-  public void testCompareTo() {
-    HCatRecord[] recs = getHCatRecords();
-    Assert.assertTrue(HCatDataCheckUtil.compareRecords(recs[0],recs[1]) == 0);
-    Assert.assertTrue(HCatDataCheckUtil.compareRecords(recs[4],recs[5]) == 0);
-  }
+        HCatRecord[] recs = getHCatRecords();
+        Assert.assertTrue(HCatDataCheckUtil.recordsEqual(recs[0], recs[1]));
+        Assert.assertTrue(HCatDataCheckUtil.recordsEqual(recs[4], recs[5]));
+    }
 
-  public void testEqualsObject() {
+    /**
+     * Test get and set calls with type
+     * @throws HCatException
+     */
+    public void testGetSetByType1() throws HCatException {
+        HCatRecord inpRec = getHCatRecords()[0];
+        HCatRecord newRec = new DefaultHCatRecord(inpRec.size());
+        HCatSchema hsch =
+                HCatSchemaUtils.getHCatSchema(
+                        "a:tinyint,b:smallint,c:int,d:bigint,e:float,f:double,g:boolean,h:string,i:binary,j:string");
 
-    HCatRecord[] recs = getHCatRecords();
-    Assert.assertTrue(HCatDataCheckUtil.recordsEqual(recs[0],recs[1]));
-    Assert.assertTrue(HCatDataCheckUtil.recordsEqual(recs[4],recs[5]));
-  }
 
-  /**
-   * Test get and set calls with type
-   * @throws HCatException
-   */
-  public void testGetSetByType1() throws HCatException{
-    HCatRecord inpRec = getHCatRecords()[0];
-    HCatRecord newRec = new DefaultHCatRecord(inpRec.size());
-    HCatSchema hsch = 
-        HCatSchemaUtils.getHCatSchema(
-            "a:tinyint,b:smallint,c:int,d:bigint,e:float,f:double,g:boolean,h:string,i:binary,j:string");
-    
+        newRec.setByte("a", hsch, inpRec.getByte("a", hsch));
+        newRec.setShort("b", hsch, inpRec.getShort("b", hsch));
+        newRec.setInteger("c", hsch, inpRec.getInteger("c", hsch));
+        newRec.setLong("d", hsch, inpRec.getLong("d", hsch));
+        newRec.setFloat("e", hsch, inpRec.getFloat("e", hsch));
+        newRec.setDouble("f", hsch, inpRec.getDouble("f", hsch));
+        newRec.setBoolean("g", hsch, inpRec.getBoolean("g", hsch));
+        newRec.setString("h", hsch, inpRec.getString("h", hsch));
+        newRec.setByteArray("i", hsch, inpRec.getByteArray("i", hsch));
+        newRec.setString("j", hsch, inpRec.getString("j", hsch));
 
-    newRec.setByte("a", hsch, inpRec.getByte("a", hsch) );
-    newRec.setShort("b", hsch, inpRec.getShort("b", hsch) );
-    newRec.setInteger("c", hsch, inpRec.getInteger("c", hsch) );
-    newRec.setLong("d", hsch, inpRec.getLong("d", hsch) );
-    newRec.setFloat("e", hsch, inpRec.getFloat("e", hsch) );
-    newRec.setDouble("f", hsch, inpRec.getDouble("f", hsch) );
-    newRec.setBoolean("g", hsch, inpRec.getBoolean("g", hsch) );
-    newRec.setString("h", hsch, inpRec.getString("h", hsch) );
-    newRec.setByteArray("i", hsch, inpRec.getByteArray("i", hsch) );
-    newRec.setString("j", hsch, inpRec.getString("j", hsch) );
-    
-    Assert.assertTrue(HCatDataCheckUtil.recordsEqual(newRec,inpRec));
-    
-    
-  }
-  
-  /**
-   * Test get and set calls with type
-   * @throws HCatException
-   */
-  public void testGetSetByType2() throws HCatException{
-    HCatRecord inpRec = getGetSet2InpRec();
-    
-    HCatRecord newRec = new DefaultHCatRecord(inpRec.size());
-    HCatSchema hsch = 
-        HCatSchemaUtils.getHCatSchema("a:binary,b:map<string,string>,c:array<int>,d:struct<i:int>");
-    
+        Assert.assertTrue(HCatDataCheckUtil.recordsEqual(newRec, inpRec));
 
-    newRec.setByteArray("a", hsch, inpRec.getByteArray("a", hsch) );
-    newRec.setMap("b", hsch, inpRec.getMap("b", hsch) );
-    newRec.setList("c", hsch, inpRec.getList("c", hsch) );
-    newRec.setStruct("d", hsch, inpRec.getStruct("d", hsch) );
 
-    Assert.assertTrue(HCatDataCheckUtil.recordsEqual(newRec,inpRec));
-  }
-  
-  
-  private HCatRecord getGetSet2InpRec() {
-    List<Object> rlist = new ArrayList<Object>();
-    
-    rlist.add(new byte[]{1,2,3});
-    
-    Map<Short, String> mapcol = new HashMap<Short, String>(3);
-    mapcol.put(new Short("2"), "hcat is cool");
-    mapcol.put(new Short("3"), "is it?");
-    mapcol.put(new Short("4"), "or is it not?");
-    rlist.add(mapcol);
+    }
 
-    List<Integer> listcol = new ArrayList<Integer>();
-    listcol.add(314);
-    listcol.add(007);
-    rlist.add( listcol);//list
-    rlist.add( listcol);//struct
-    return new DefaultHCatRecord(rlist);
-  }
+    /**
+     * Test get and set calls with type
+     * @throws HCatException
+     */
+    public void testGetSetByType2() throws HCatException {
+        HCatRecord inpRec = getGetSet2InpRec();
 
-  private HCatRecord[] getHCatRecords(){
+        HCatRecord newRec = new DefaultHCatRecord(inpRec.size());
+        HCatSchema hsch =
+                HCatSchemaUtils.getHCatSchema("a:binary,b:map<string,string>,c:array<int>,d:struct<i:int>");
 
-    List<Object> rec_1 = new ArrayList<Object>(8);
-    rec_1.add(new Byte("123"));
-    rec_1.add(new Short("456"));
-    rec_1.add( new Integer(789));
-    rec_1.add( new Long(1000L));
-    rec_1.add( new Float(5.3F));
-    rec_1.add( new Double(5.3D));
-    rec_1.add( new Boolean(true));
-    rec_1.add( new String("hcat and hadoop"));
-    rec_1.add( null);
-    rec_1.add( "null");
 
-    HCatRecord tup_1 = new DefaultHCatRecord(rec_1);
+        newRec.setByteArray("a", hsch, inpRec.getByteArray("a", hsch));
+        newRec.setMap("b", hsch, inpRec.getMap("b", hsch));
+        newRec.setList("c", hsch, inpRec.getList("c", hsch));
+        newRec.setStruct("d", hsch, inpRec.getStruct("d", hsch));
 
-    List<Object> rec_2 = new ArrayList<Object>(8);
-    rec_2.add( new Byte("123"));
-    rec_2.add( new Short("456"));
-    rec_2.add( new Integer(789));
-    rec_2.add( new Long(1000L));
-    rec_2.add( new Float(5.3F));
-    rec_2.add( new Double(5.3D));
-    rec_2.add( new Boolean(true));
-    rec_2.add( new String("hcat and hadoop"));
-    rec_2.add( null);
-    rec_2.add( "null");
-    HCatRecord tup_2 = new DefaultHCatRecord(rec_2);
+        Assert.assertTrue(HCatDataCheckUtil.recordsEqual(newRec, inpRec));
+    }
 
-    List<Object> rec_3 = new ArrayList<Object>(10);
-    rec_3.add(new Byte("123"));
-    rec_3.add(new Short("456"));
-    rec_3.add( new Integer(789));
-    rec_3.add( new Long(1000L));
-    rec_3.add( new Double(5.3D));
-    rec_3.add( new String("hcat and hadoop"));
-    rec_3.add( null);
-    List<Integer> innerList = new ArrayList<Integer>();
-    innerList.add(314);
-    innerList.add(007);
-    rec_3.add( innerList);
-    Map<Short, String> map = new HashMap<Short, String>(3);
-    map.put(new Short("2"), "hcat is cool");
-    map.put(new Short("3"), "is it?");
-    map.put(new Short("4"), "or is it not?");
-    rec_3.add(map);
 
-    HCatRecord tup_3 = new DefaultHCatRecord(rec_3);
+    private HCatRecord getGetSet2InpRec() {
+        List<Object> rlist = new ArrayList<Object>();
 
-    List<Object> rec_4 = new ArrayList<Object>(8);
-    rec_4.add( new Byte("123"));
-    rec_4.add( new Short("456"));
-    rec_4.add( new Integer(789));
-    rec_4.add( new Long(1000L));
-    rec_4.add( new Double(5.3D));
-    rec_4.add( new String("hcat and hadoop"));
-    rec_4.add( null);
-    rec_4.add( "null");
+        rlist.add(new byte[]{1, 2, 3});
 
-    Map<Short, String> map2 = new HashMap<Short, String>(3);
-    map2.put(new Short("2"), "hcat is cool");
-    map2.put(new Short("3"), "is it?");
-    map2.put(new Short("4"), "or is it not?");
-    rec_4.add(map2);
-    List<Integer> innerList2 = new ArrayList<Integer>();
-    innerList2.add(314);
-    innerList2.add(007);
-    rec_4.add( innerList2);
-    HCatRecord tup_4 = new DefaultHCatRecord(rec_4);
+        Map<Short, String> mapcol = new HashMap<Short, String>(3);
+        mapcol.put(new Short("2"), "hcat is cool");
+        mapcol.put(new Short("3"), "is it?");
+        mapcol.put(new Short("4"), "or is it not?");
+        rlist.add(mapcol);
 
-    
-    List<Object> rec_5 = new ArrayList<Object>(3);
-    rec_5.add( getByteArray());
-    rec_5.add( getStruct());
-    rec_5.add( getList());
-    HCatRecord tup_5 = new DefaultHCatRecord(rec_5);
-    
+        List<Integer> listcol = new ArrayList<Integer>();
+        listcol.add(314);
+        listcol.add(007);
+        rlist.add(listcol);//list
+        rlist.add(listcol);//struct
+        return new DefaultHCatRecord(rlist);
+    }
 
-    List<Object> rec_6 = new ArrayList<Object>(3);
-    rec_6.add( getByteArray());
-    rec_6.add( getStruct());
-    rec_6.add( getList());
-    HCatRecord tup_6 = new DefaultHCatRecord(rec_6);
+    private HCatRecord[] getHCatRecords() {
 
-    
-    return  new HCatRecord[]{tup_1,tup_2,tup_3,tup_4,tup_5,tup_6};
+        List<Object> rec_1 = new ArrayList<Object>(8);
+        rec_1.add(new Byte("123"));
+        rec_1.add(new Short("456"));
+        rec_1.add(new Integer(789));
+        rec_1.add(new Long(1000L));
+        rec_1.add(new Float(5.3F));
+        rec_1.add(new Double(5.3D));
+        rec_1.add(new Boolean(true));
+        rec_1.add(new String("hcat and hadoop"));
+        rec_1.add(null);
+        rec_1.add("null");
 
-  }
+        HCatRecord tup_1 = new DefaultHCatRecord(rec_1);
 
-  private Object getList() {
-    return getStruct();
-  }
+        List<Object> rec_2 = new ArrayList<Object>(8);
+        rec_2.add(new Byte("123"));
+        rec_2.add(new Short("456"));
+        rec_2.add(new Integer(789));
+        rec_2.add(new Long(1000L));
+        rec_2.add(new Float(5.3F));
+        rec_2.add(new Double(5.3D));
+        rec_2.add(new Boolean(true));
+        rec_2.add(new String("hcat and hadoop"));
+        rec_2.add(null);
+        rec_2.add("null");
+        HCatRecord tup_2 = new DefaultHCatRecord(rec_2);
 
-  private Object getByteArray() {
-    return new byte[]{1,2,3,4};
-  }
+        List<Object> rec_3 = new ArrayList<Object>(10);
+        rec_3.add(new Byte("123"));
+        rec_3.add(new Short("456"));
+        rec_3.add(new Integer(789));
+        rec_3.add(new Long(1000L));
+        rec_3.add(new Double(5.3D));
+        rec_3.add(new String("hcat and hadoop"));
+        rec_3.add(null);
+        List<Integer> innerList = new ArrayList<Integer>();
+        innerList.add(314);
+        innerList.add(007);
+        rec_3.add(innerList);
+        Map<Short, String> map = new HashMap<Short, String>(3);
+        map.put(new Short("2"), "hcat is cool");
+        map.put(new Short("3"), "is it?");
+        map.put(new Short("4"), "or is it not?");
+        rec_3.add(map);
 
-  private List<?> getStruct() {
-    List<Object> struct = new ArrayList<Object>();
-    struct.add(new Integer(1));
-    struct.add(new String("x"));
-    return struct;
-  }
+        HCatRecord tup_3 = new DefaultHCatRecord(rec_3);
+
+        List<Object> rec_4 = new ArrayList<Object>(8);
+        rec_4.add(new Byte("123"));
+        rec_4.add(new Short("456"));
+        rec_4.add(new Integer(789));
+        rec_4.add(new Long(1000L));
+        rec_4.add(new Double(5.3D));
+        rec_4.add(new String("hcat and hadoop"));
+        rec_4.add(null);
+        rec_4.add("null");
+
+        Map<Short, String> map2 = new HashMap<Short, String>(3);
+        map2.put(new Short("2"), "hcat is cool");
+        map2.put(new Short("3"), "is it?");
+        map2.put(new Short("4"), "or is it not?");
+        rec_4.add(map2);
+        List<Integer> innerList2 = new ArrayList<Integer>();
+        innerList2.add(314);
+        innerList2.add(007);
+        rec_4.add(innerList2);
+        HCatRecord tup_4 = new DefaultHCatRecord(rec_4);
+
+
+        List<Object> rec_5 = new ArrayList<Object>(3);
+        rec_5.add(getByteArray());
+        rec_5.add(getStruct());
+        rec_5.add(getList());
+        HCatRecord tup_5 = new DefaultHCatRecord(rec_5);
+
+
+        List<Object> rec_6 = new ArrayList<Object>(3);
+        rec_6.add(getByteArray());
+        rec_6.add(getStruct());
+        rec_6.add(getList());
+        HCatRecord tup_6 = new DefaultHCatRecord(rec_6);
+
+
+        return new HCatRecord[]{tup_1, tup_2, tup_3, tup_4, tup_5, tup_6};
+
+    }
+
+    private Object getList() {
+        return getStruct();
+    }
+
+    private Object getByteArray() {
+        return new byte[]{1, 2, 3, 4};
+    }
+
+    private List<?> getStruct() {
+        List<Object> struct = new ArrayList<Object>();
+        struct.add(new Integer(1));
+        struct.add(new String("x"));
+        return struct;
+    }
 }
diff --git a/src/test/org/apache/hcatalog/data/TestHCatRecordSerDe.java b/src/test/org/apache/hcatalog/data/TestHCatRecordSerDe.java
index 21ab32b..5f102e9 100644
--- a/src/test/org/apache/hcatalog/data/TestHCatRecordSerDe.java
+++ b/src/test/org/apache/hcatalog/data/TestHCatRecordSerDe.java
@@ -34,135 +34,135 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class TestHCatRecordSerDe extends TestCase{
+public class TestHCatRecordSerDe extends TestCase {
 
     private static final Logger LOG = LoggerFactory.getLogger(TestHCatRecordSerDe.class);
 
-  public Map<Properties,HCatRecord> getData(){
-    Map<Properties,HCatRecord> data = new HashMap<Properties,HCatRecord>();
+    public Map<Properties, HCatRecord> getData() {
+        Map<Properties, HCatRecord> data = new HashMap<Properties, HCatRecord>();
 
-    List<Object> rlist = new ArrayList<Object>(11);
-    rlist.add(new Byte("123"));
-    rlist.add(new Short("456"));
-    rlist.add(new Integer(789));
-    rlist.add(new Long(1000L));
-    rlist.add(new Double(5.3D));
-    rlist.add(new Float(2.39F));
-    rlist.add(new String("hcat and hadoop"));
-    rlist.add(null);
+        List<Object> rlist = new ArrayList<Object>(11);
+        rlist.add(new Byte("123"));
+        rlist.add(new Short("456"));
+        rlist.add(new Integer(789));
+        rlist.add(new Long(1000L));
+        rlist.add(new Double(5.3D));
+        rlist.add(new Float(2.39F));
+        rlist.add(new String("hcat and hadoop"));
+        rlist.add(null);
 
-    List<Object> innerStruct = new ArrayList<Object>(2);
-    innerStruct.add(new String("abc"));
-    innerStruct.add(new String("def"));
-    rlist.add(innerStruct);
+        List<Object> innerStruct = new ArrayList<Object>(2);
+        innerStruct.add(new String("abc"));
+        innerStruct.add(new String("def"));
+        rlist.add(innerStruct);
 
-    List<Integer> innerList = new ArrayList<Integer>();
-    innerList.add(314);
-    innerList.add(007);
-    rlist.add(innerList);
+        List<Integer> innerList = new ArrayList<Integer>();
+        innerList.add(314);
+        innerList.add(007);
+        rlist.add(innerList);
 
-    Map<Short, String> map = new HashMap<Short, String>(3);
-    map.put(new Short("2"), "hcat is cool");
-    map.put(new Short("3"), "is it?");
-    map.put(new Short("4"), "or is it not?");
-    rlist.add(map);
+        Map<Short, String> map = new HashMap<Short, String>(3);
+        map.put(new Short("2"), "hcat is cool");
+        map.put(new Short("3"), "is it?");
+        map.put(new Short("4"), "or is it not?");
+        rlist.add(map);
 
-    rlist.add(new Boolean(true));
+        rlist.add(new Boolean(true));
 
-    List<Object> c1 = new ArrayList<Object>();
-      List<Object> c1_1 = new ArrayList<Object>();
-      c1_1.add(new Integer(12));
+        List<Object> c1 = new ArrayList<Object>();
+        List<Object> c1_1 = new ArrayList<Object>();
+        c1_1.add(new Integer(12));
         List<Object> i2 = new ArrayList<Object>();
-          List<Integer> ii1 = new ArrayList<Integer>();
-            ii1.add(new Integer(13));
-            ii1.add(new Integer(14));
-          i2.add(ii1);
-          Map<String,List<?>> ii2 = new HashMap<String,List<?>>();
-            List<Integer> iii1 = new ArrayList<Integer>();
-              iii1.add(new Integer(15));
-            ii2.put("phew", iii1);
-          i2.add(ii2);
-      c1_1.add(i2);
-      c1.add(c1_1);
-    rlist.add(c1);
-    List<Object> am = new ArrayList<Object>();
-      Map<String,String> am_1 = new HashMap<String,String>();
-        am_1.put("noo","haha");
-      am.add(am_1);
-    rlist.add(am);
-    List<Object> aa = new ArrayList<Object>();
-      List<String> aa_1 = new ArrayList<String>();
+        List<Integer> ii1 = new ArrayList<Integer>();
+        ii1.add(new Integer(13));
+        ii1.add(new Integer(14));
+        i2.add(ii1);
+        Map<String, List<?>> ii2 = new HashMap<String, List<?>>();
+        List<Integer> iii1 = new ArrayList<Integer>();
+        iii1.add(new Integer(15));
+        ii2.put("phew", iii1);
+        i2.add(ii2);
+        c1_1.add(i2);
+        c1.add(c1_1);
+        rlist.add(c1);
+        List<Object> am = new ArrayList<Object>();
+        Map<String, String> am_1 = new HashMap<String, String>();
+        am_1.put("noo", "haha");
+        am.add(am_1);
+        rlist.add(am);
+        List<Object> aa = new ArrayList<Object>();
+        List<String> aa_1 = new ArrayList<String>();
         aa_1.add("bloo");
         aa_1.add("bwahaha");
-      aa.add(aa_1);
-    rlist.add(aa);
+        aa.add(aa_1);
+        rlist.add(aa);
 
-    String typeString =
-        "tinyint,smallint,int,bigint,double,float,string,string,"
-        + "struct<a:string,b:string>,array<int>,map<smallint,string>,boolean,"
-        + "array<struct<i1:int,i2:struct<ii1:array<int>,ii2:map<string,struct<iii1:int>>>>>,"
-        + "array<map<string,string>>,array<array<string>>";
-    Properties props = new Properties();
+        String typeString =
+                "tinyint,smallint,int,bigint,double,float,string,string,"
+                        + "struct<a:string,b:string>,array<int>,map<smallint,string>,boolean,"
+                        + "array<struct<i1:int,i2:struct<ii1:array<int>,ii2:map<string,struct<iii1:int>>>>>,"
+                        + "array<map<string,string>>,array<array<string>>";
+        Properties props = new Properties();
 
-    props.put(Constants.LIST_COLUMNS, "ti,si,i,bi,d,f,s,n,r,l,m,b,c1,am,aa");
-    props.put(Constants.LIST_COLUMN_TYPES, typeString);
+        props.put(Constants.LIST_COLUMNS, "ti,si,i,bi,d,f,s,n,r,l,m,b,c1,am,aa");
+        props.put(Constants.LIST_COLUMN_TYPES, typeString);
 //    props.put(Constants.SERIALIZATION_NULL_FORMAT, "\\N");
 //    props.put(Constants.SERIALIZATION_FORMAT, "1");
 
-    data.put(props, new DefaultHCatRecord(rlist));
-    return data;
-  }
-
-  public void testRW() throws Exception {
-
-    Configuration conf = new Configuration();
-
-    for (Entry<Properties,HCatRecord> e : getData().entrySet()){
-      Properties tblProps = e.getKey();
-      HCatRecord r = e.getValue();
-
-      HCatRecordSerDe hrsd = new HCatRecordSerDe();
-      hrsd.initialize(conf, tblProps);
-
-      LOG.info("ORIG: {}", r);
-
-      Writable s = hrsd.serialize(r,hrsd.getObjectInspector());
-      LOG.info("ONE: {}", s);
-
-      HCatRecord r2 = (HCatRecord) hrsd.deserialize(s);
-      Assert.assertTrue(HCatDataCheckUtil.recordsEqual(r,r2));
-
-      // If it went through correctly, then s is also a HCatRecord,
-      // and also equal to the above, and a deepcopy, and this holds
-      // through for multiple levels more of serialization as well.
-
-      Writable s2 = hrsd.serialize(s, hrsd.getObjectInspector());
-      LOG.info("TWO: {}", s2);
-      Assert.assertTrue(HCatDataCheckUtil.recordsEqual(r,(HCatRecord)s));
-      Assert.assertTrue(HCatDataCheckUtil.recordsEqual(r,(HCatRecord)s2));
-
-      // serialize using another serde, and read out that object repr.
-      LazySimpleSerDe testSD = new LazySimpleSerDe();
-      testSD.initialize(conf, tblProps);
-
-      Writable s3 = testSD.serialize(s, hrsd.getObjectInspector());
-      LOG.info("THREE: {}",s3);
-      Object o3 = testSD.deserialize(s3);
-      Assert.assertFalse(r.getClass().equals(o3.getClass()));
-
-      // then serialize again using hrsd, and compare results
-      HCatRecord s4 = (HCatRecord) hrsd.serialize(o3, testSD.getObjectInspector());
-      LOG.info("FOUR: {}", s4);
-
-      // Test LazyHCatRecord init and read
-      LazyHCatRecord s5 = new LazyHCatRecord(o3,testSD.getObjectInspector());
-      LOG.info("FIVE: {}",s5);
-
-      LazyHCatRecord s6 = new LazyHCatRecord(s4,hrsd.getObjectInspector());
-      LOG.info("SIX: {}", s6);
-
+        data.put(props, new DefaultHCatRecord(rlist));
+        return data;
     }
 
-  }
+    public void testRW() throws Exception {
+
+        Configuration conf = new Configuration();
+
+        for (Entry<Properties, HCatRecord> e : getData().entrySet()) {
+            Properties tblProps = e.getKey();
+            HCatRecord r = e.getValue();
+
+            HCatRecordSerDe hrsd = new HCatRecordSerDe();
+            hrsd.initialize(conf, tblProps);
+
+            LOG.info("ORIG: {}", r);
+
+            Writable s = hrsd.serialize(r, hrsd.getObjectInspector());
+            LOG.info("ONE: {}", s);
+
+            HCatRecord r2 = (HCatRecord) hrsd.deserialize(s);
+            Assert.assertTrue(HCatDataCheckUtil.recordsEqual(r, r2));
+
+            // If it went through correctly, then s is also a HCatRecord,
+            // and also equal to the above, and a deepcopy, and this holds
+            // through for multiple levels more of serialization as well.
+
+            Writable s2 = hrsd.serialize(s, hrsd.getObjectInspector());
+            LOG.info("TWO: {}", s2);
+            Assert.assertTrue(HCatDataCheckUtil.recordsEqual(r, (HCatRecord) s));
+            Assert.assertTrue(HCatDataCheckUtil.recordsEqual(r, (HCatRecord) s2));
+
+            // serialize using another serde, and read out that object repr.
+            LazySimpleSerDe testSD = new LazySimpleSerDe();
+            testSD.initialize(conf, tblProps);
+
+            Writable s3 = testSD.serialize(s, hrsd.getObjectInspector());
+            LOG.info("THREE: {}", s3);
+            Object o3 = testSD.deserialize(s3);
+            Assert.assertFalse(r.getClass().equals(o3.getClass()));
+
+            // then serialize again using hrsd, and compare results
+            HCatRecord s4 = (HCatRecord) hrsd.serialize(o3, testSD.getObjectInspector());
+            LOG.info("FOUR: {}", s4);
+
+            // Test LazyHCatRecord init and read
+            LazyHCatRecord s5 = new LazyHCatRecord(o3, testSD.getObjectInspector());
+            LOG.info("FIVE: {}", s5);
+
+            LazyHCatRecord s6 = new LazyHCatRecord(s4, hrsd.getObjectInspector());
+            LOG.info("SIX: {}", s6);
+
+        }
+
+    }
 
 }
diff --git a/src/test/org/apache/hcatalog/data/TestJsonSerDe.java b/src/test/org/apache/hcatalog/data/TestJsonSerDe.java
index 053657c..5e363df 100644
--- a/src/test/org/apache/hcatalog/data/TestJsonSerDe.java
+++ b/src/test/org/apache/hcatalog/data/TestJsonSerDe.java
@@ -32,182 +32,182 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class TestJsonSerDe extends TestCase{
+public class TestJsonSerDe extends TestCase {
 
-  private static final Logger LOG = LoggerFactory.getLogger(TestJsonSerDe.class);
+    private static final Logger LOG = LoggerFactory.getLogger(TestJsonSerDe.class);
 
-  public List<Pair<Properties,HCatRecord>> getData(){
-    List<Pair<Properties,HCatRecord>> data = new ArrayList<Pair<Properties,HCatRecord>>();
+    public List<Pair<Properties, HCatRecord>> getData() {
+        List<Pair<Properties, HCatRecord>> data = new ArrayList<Pair<Properties, HCatRecord>>();
 
-    List<Object> rlist = new ArrayList<Object>(13);
-    rlist.add(new Byte("123"));
-    rlist.add(new Short("456"));
-    rlist.add(new Integer(789));
-    rlist.add(new Long(1000L));
-    rlist.add(new Double(5.3D));
-    rlist.add(new Float(2.39F));
-    rlist.add(new String("hcat and hadoop"));
-    rlist.add(null);
+        List<Object> rlist = new ArrayList<Object>(13);
+        rlist.add(new Byte("123"));
+        rlist.add(new Short("456"));
+        rlist.add(new Integer(789));
+        rlist.add(new Long(1000L));
+        rlist.add(new Double(5.3D));
+        rlist.add(new Float(2.39F));
+        rlist.add(new String("hcat and hadoop"));
+        rlist.add(null);
 
-    List<Object> innerStruct = new ArrayList<Object>(2);
-    innerStruct.add(new String("abc"));
-    innerStruct.add(new String("def"));
-    rlist.add(innerStruct);
+        List<Object> innerStruct = new ArrayList<Object>(2);
+        innerStruct.add(new String("abc"));
+        innerStruct.add(new String("def"));
+        rlist.add(innerStruct);
 
-    List<Integer> innerList = new ArrayList<Integer>();
-    innerList.add(314);
-    innerList.add(007);
-    rlist.add(innerList);
+        List<Integer> innerList = new ArrayList<Integer>();
+        innerList.add(314);
+        innerList.add(007);
+        rlist.add(innerList);
 
-    Map<Short, String> map = new HashMap<Short, String>(3);
-    map.put(new Short("2"), "hcat is cool");
-    map.put(new Short("3"), "is it?");
-    map.put(new Short("4"), "or is it not?");
-    rlist.add(map);
+        Map<Short, String> map = new HashMap<Short, String>(3);
+        map.put(new Short("2"), "hcat is cool");
+        map.put(new Short("3"), "is it?");
+        map.put(new Short("4"), "or is it not?");
+        rlist.add(map);
 
-    rlist.add(new Boolean(true));
+        rlist.add(new Boolean(true));
 
-    List<Object> c1 = new ArrayList<Object>();
-      List<Object> c1_1 = new ArrayList<Object>();
-      c1_1.add(new Integer(12));
+        List<Object> c1 = new ArrayList<Object>();
+        List<Object> c1_1 = new ArrayList<Object>();
+        c1_1.add(new Integer(12));
         List<Object> i2 = new ArrayList<Object>();
-          List<Integer> ii1 = new ArrayList<Integer>();
-            ii1.add(new Integer(13));
-            ii1.add(new Integer(14));
-          i2.add(ii1);
-          Map<String,List<?>> ii2 = new HashMap<String,List<?>>();
-            List<Integer> iii1 = new ArrayList<Integer>();
-              iii1.add(new Integer(15));
-            ii2.put("phew", iii1);
-          i2.add(ii2);
-      c1_1.add(i2);
-      c1.add(c1_1);
-    rlist.add(c1);
+        List<Integer> ii1 = new ArrayList<Integer>();
+        ii1.add(new Integer(13));
+        ii1.add(new Integer(14));
+        i2.add(ii1);
+        Map<String, List<?>> ii2 = new HashMap<String, List<?>>();
+        List<Integer> iii1 = new ArrayList<Integer>();
+        iii1.add(new Integer(15));
+        ii2.put("phew", iii1);
+        i2.add(ii2);
+        c1_1.add(i2);
+        c1.add(c1_1);
+        rlist.add(c1);
 
-    List<Object> nlist = new ArrayList<Object>(13);
-    nlist.add(null); // tinyint
-    nlist.add(null); // smallint
-    nlist.add(null); // int
-    nlist.add(null); // bigint
-    nlist.add(null); // double
-    nlist.add(null); // float
-    nlist.add(null); // string
-    nlist.add(null); // string
-    nlist.add(null); // struct
-    nlist.add(null); // array
-    nlist.add(null); // map
-    nlist.add(null); // bool
-    nlist.add(null); // complex
+        List<Object> nlist = new ArrayList<Object>(13);
+        nlist.add(null); // tinyint
+        nlist.add(null); // smallint
+        nlist.add(null); // int
+        nlist.add(null); // bigint
+        nlist.add(null); // double
+        nlist.add(null); // float
+        nlist.add(null); // string
+        nlist.add(null); // string
+        nlist.add(null); // struct
+        nlist.add(null); // array
+        nlist.add(null); // map
+        nlist.add(null); // bool
+        nlist.add(null); // complex
 
-    String typeString =
-        "tinyint,smallint,int,bigint,double,float,string,string,"
-        + "struct<a:string,b:string>,array<int>,map<smallint,string>,boolean,"
-        + "array<struct<i1:int,i2:struct<ii1:array<int>,ii2:map<string,struct<iii1:int>>>>>";
-    Properties props = new Properties();
+        String typeString =
+                "tinyint,smallint,int,bigint,double,float,string,string,"
+                        + "struct<a:string,b:string>,array<int>,map<smallint,string>,boolean,"
+                        + "array<struct<i1:int,i2:struct<ii1:array<int>,ii2:map<string,struct<iii1:int>>>>>";
+        Properties props = new Properties();
 
-    props.put(Constants.LIST_COLUMNS, "ti,si,i,bi,d,f,s,n,r,l,m,b,c1");
-    props.put(Constants.LIST_COLUMN_TYPES, typeString);
+        props.put(Constants.LIST_COLUMNS, "ti,si,i,bi,d,f,s,n,r,l,m,b,c1");
+        props.put(Constants.LIST_COLUMN_TYPES, typeString);
 //    props.put(Constants.SERIALIZATION_NULL_FORMAT, "\\N");
 //    props.put(Constants.SERIALIZATION_FORMAT, "1");
 
-    data.add(new Pair(props, new DefaultHCatRecord(rlist)));
-    data.add(new Pair(props, new DefaultHCatRecord(nlist)));
-    return data;
-  }
-
-  public void testRW() throws Exception {
-
-    Configuration conf = new Configuration();
-
-    for (Pair<Properties,HCatRecord> e : getData()){
-      Properties tblProps = e.first;
-      HCatRecord r = e.second;
-
-      HCatRecordSerDe hrsd = new HCatRecordSerDe();
-      hrsd.initialize(conf, tblProps);
-
-      JsonSerDe jsde = new JsonSerDe();
-      jsde.initialize(conf, tblProps);
-
-      LOG.info("ORIG:{}",r);
-
-      Writable s = hrsd.serialize(r,hrsd.getObjectInspector());
-      LOG.info("ONE:{}",s);
-
-      Object o1 = hrsd.deserialize(s);
-      assertTrue(HCatDataCheckUtil.recordsEqual(r, (HCatRecord) o1));
-
-      Writable s2 = jsde.serialize(o1, hrsd.getObjectInspector());
-      LOG.info("TWO:{}",s2);
-      Object o2 = jsde.deserialize(s2);
-      LOG.info("deserialized TWO : {} ", o2);
-
-      assertTrue(HCatDataCheckUtil.recordsEqual(r, (HCatRecord) o2));
+        data.add(new Pair(props, new DefaultHCatRecord(rlist)));
+        data.add(new Pair(props, new DefaultHCatRecord(nlist)));
+        return data;
     }
 
-  }
+    public void testRW() throws Exception {
 
-  public void testRobustRead() throws Exception {
-    /**
-     *  This test has been added to account for HCATALOG-436
-     *  We write out columns with "internal column names" such
-     *  as "_col0", but try to read with retular column names.
-     */
-    
-    Configuration conf = new Configuration();
+        Configuration conf = new Configuration();
 
-    for (Pair<Properties,HCatRecord> e : getData()){
-      Properties tblProps = e.first;
-      HCatRecord r = e.second;
-      
-      Properties internalTblProps = new Properties();
-      for (Map.Entry pe : tblProps.entrySet()){
-        if (!pe.getKey().equals(Constants.LIST_COLUMNS)){
-          internalTblProps.put(pe.getKey(), pe.getValue());
-        } else {
-          internalTblProps.put(pe.getKey(),getInternalNames((String) pe.getValue()));
+        for (Pair<Properties, HCatRecord> e : getData()) {
+            Properties tblProps = e.first;
+            HCatRecord r = e.second;
+
+            HCatRecordSerDe hrsd = new HCatRecordSerDe();
+            hrsd.initialize(conf, tblProps);
+
+            JsonSerDe jsde = new JsonSerDe();
+            jsde.initialize(conf, tblProps);
+
+            LOG.info("ORIG:{}", r);
+
+            Writable s = hrsd.serialize(r, hrsd.getObjectInspector());
+            LOG.info("ONE:{}", s);
+
+            Object o1 = hrsd.deserialize(s);
+            assertTrue(HCatDataCheckUtil.recordsEqual(r, (HCatRecord) o1));
+
+            Writable s2 = jsde.serialize(o1, hrsd.getObjectInspector());
+            LOG.info("TWO:{}", s2);
+            Object o2 = jsde.deserialize(s2);
+            LOG.info("deserialized TWO : {} ", o2);
+
+            assertTrue(HCatDataCheckUtil.recordsEqual(r, (HCatRecord) o2));
         }
-      }
-      
-      LOG.info("orig tbl props:{}",tblProps);
-      LOG.info("modif tbl props:{}",internalTblProps);
 
-      JsonSerDe wjsd = new JsonSerDe();
-      wjsd.initialize(conf, internalTblProps);
-
-      JsonSerDe rjsd = new JsonSerDe();
-      rjsd.initialize(conf, tblProps);
-
-      LOG.info("ORIG:{}",r);
-
-      Writable s = wjsd.serialize(r,wjsd.getObjectInspector());
-      LOG.info("ONE:{}",s);
-
-      Object o1 = wjsd.deserialize(s);
-      LOG.info("deserialized ONE : {} ", o1);
-
-      Object o2 = rjsd.deserialize(s);
-      LOG.info("deserialized TWO : {} ", o2);
-      assertTrue(HCatDataCheckUtil.recordsEqual(r, (HCatRecord) o2));
     }
-    
-  }
-  
-  String getInternalNames(String columnNames){
-    if (columnNames == null) { 
-      return null; 
+
+    public void testRobustRead() throws Exception {
+        /**
+         *  This test has been added to account for HCATALOG-436
+         *  We write out columns with "internal column names" such
+         *  as "_col0", but try to read with retular column names.
+         */
+
+        Configuration conf = new Configuration();
+
+        for (Pair<Properties, HCatRecord> e : getData()) {
+            Properties tblProps = e.first;
+            HCatRecord r = e.second;
+
+            Properties internalTblProps = new Properties();
+            for (Map.Entry pe : tblProps.entrySet()) {
+                if (!pe.getKey().equals(Constants.LIST_COLUMNS)) {
+                    internalTblProps.put(pe.getKey(), pe.getValue());
+                } else {
+                    internalTblProps.put(pe.getKey(), getInternalNames((String) pe.getValue()));
+                }
+            }
+
+            LOG.info("orig tbl props:{}", tblProps);
+            LOG.info("modif tbl props:{}", internalTblProps);
+
+            JsonSerDe wjsd = new JsonSerDe();
+            wjsd.initialize(conf, internalTblProps);
+
+            JsonSerDe rjsd = new JsonSerDe();
+            rjsd.initialize(conf, tblProps);
+
+            LOG.info("ORIG:{}", r);
+
+            Writable s = wjsd.serialize(r, wjsd.getObjectInspector());
+            LOG.info("ONE:{}", s);
+
+            Object o1 = wjsd.deserialize(s);
+            LOG.info("deserialized ONE : {} ", o1);
+
+            Object o2 = rjsd.deserialize(s);
+            LOG.info("deserialized TWO : {} ", o2);
+            assertTrue(HCatDataCheckUtil.recordsEqual(r, (HCatRecord) o2));
+        }
+
     }
-    if (columnNames.isEmpty()) { 
-      return ""; 
+
+    String getInternalNames(String columnNames) {
+        if (columnNames == null) {
+            return null;
+        }
+        if (columnNames.isEmpty()) {
+            return "";
+        }
+
+        StringBuffer sb = new StringBuffer();
+        int numStrings = columnNames.split(",").length;
+        sb.append("_col0");
+        for (int i = 1; i < numStrings; i++) {
+            sb.append(",");
+            sb.append(HiveConf.getColumnInternalName(i));
+        }
+        return sb.toString();
     }
-    
-    StringBuffer sb = new StringBuffer();
-    int numStrings = columnNames.split(",").length;
-    sb.append("_col0");
-    for (int i = 1; i < numStrings ; i++ ){
-      sb.append(",");
-      sb.append(HiveConf.getColumnInternalName(i));
-    }
-    return sb.toString();
-  }
 }
diff --git a/src/test/org/apache/hcatalog/data/TestLazyHCatRecord.java b/src/test/org/apache/hcatalog/data/TestLazyHCatRecord.java
index 9b5f7e5..b532df9 100644
--- a/src/test/org/apache/hcatalog/data/TestLazyHCatRecord.java
+++ b/src/test/org/apache/hcatalog/data/TestLazyHCatRecord.java
@@ -31,165 +31,162 @@
 
 public class TestLazyHCatRecord {
 
-  private final int INT_CONST = 789;
-  private final long LONG_CONST = 5000000000L;
-  private final double DOUBLE_CONST = 3.141592654;
-  private final String STRING_CONST = "hello world";
+    private final int INT_CONST = 789;
+    private final long LONG_CONST = 5000000000L;
+    private final double DOUBLE_CONST = 3.141592654;
+    private final String STRING_CONST = "hello world";
 
-  @Test
-  public void testGet() throws Exception {
-    HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector());
-    Assert.assertEquals(INT_CONST, ((Integer) r.get(0)).intValue());
-    Assert.assertEquals(LONG_CONST, ((Long) r.get(1)).longValue());
-    Assert.assertEquals(DOUBLE_CONST, ((Double) r.get(2)).doubleValue(), 0);
-    Assert.assertEquals(STRING_CONST, (String) r.get(3));
-  }
-
-  @Test
-  public void testGetWithName() throws Exception {
-    TypeInfo ti = getTypeInfo();
-    HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector(ti));
-    HCatSchema schema = HCatSchemaUtils.getHCatSchema(ti)
-                                          .get(0).getStructSubSchema();
-    Assert.assertEquals(INT_CONST, ((Integer) r.get("an_int", schema)).intValue());
-    Assert.assertEquals(LONG_CONST, ((Long) r.get("a_long", schema)).longValue());
-    Assert.assertEquals(DOUBLE_CONST, ((Double) r.get("a_double", schema)).doubleValue(), 0);
-    Assert.assertEquals(STRING_CONST, (String) r.get("a_string", schema));
-  }
-
-  @Test
-  public void testGetAll() throws Exception {
-    HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector());
-    List<Object> list = r.getAll();
-    Assert.assertEquals(INT_CONST, ((Integer) list.get(0)).intValue());
-    Assert.assertEquals(LONG_CONST, ((Long) list.get(1)).longValue());
-    Assert.assertEquals(DOUBLE_CONST, ((Double) list.get(2)).doubleValue(), 0);
-    Assert.assertEquals(STRING_CONST, (String) list.get(3));
-  }
-
-  @Test
-  public void testSet() throws Exception {
-    HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector());
-    boolean sawException = false;
-    try {
-      r.set(3, "Mary had a little lamb");
-    } catch (UnsupportedOperationException uoe) {
-      sawException = true;
+    @Test
+    public void testGet() throws Exception {
+        HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector());
+        Assert.assertEquals(INT_CONST, ((Integer) r.get(0)).intValue());
+        Assert.assertEquals(LONG_CONST, ((Long) r.get(1)).longValue());
+        Assert.assertEquals(DOUBLE_CONST, ((Double) r.get(2)).doubleValue(), 0);
+        Assert.assertEquals(STRING_CONST, (String) r.get(3));
     }
-    Assert.assertTrue(sawException);
-  }
 
-  @Test
-  public void testSize() throws Exception {
-    HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector());
-    Assert.assertEquals(4, r.size());
-  }
-
-  @Test
-  public void testReadFields() throws Exception {
-    HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector());
-    boolean sawException = false;
-    try {
-      r.readFields(null);
-    } catch (UnsupportedOperationException uoe) {
-      sawException = true;
+    @Test
+    public void testGetWithName() throws Exception {
+        TypeInfo ti = getTypeInfo();
+        HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector(ti));
+        HCatSchema schema = HCatSchemaUtils.getHCatSchema(ti)
+                                              .get(0).getStructSubSchema();
+        Assert.assertEquals(INT_CONST, ((Integer) r.get("an_int", schema)).intValue());
+        Assert.assertEquals(LONG_CONST, ((Long) r.get("a_long", schema)).longValue());
+        Assert.assertEquals(DOUBLE_CONST, ((Double) r.get("a_double", schema)).doubleValue(), 0);
+        Assert.assertEquals(STRING_CONST, (String) r.get("a_string", schema));
     }
-    Assert.assertTrue(sawException);
-  }
 
-  @Test
-  public void testWrite() throws Exception {
-    HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector());
-    boolean sawException = false;
-    try {
-      r.write(null);
-    } catch (UnsupportedOperationException uoe) {
-      sawException = true;
+    @Test
+    public void testGetAll() throws Exception {
+        HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector());
+        List<Object> list = r.getAll();
+        Assert.assertEquals(INT_CONST, ((Integer) list.get(0)).intValue());
+        Assert.assertEquals(LONG_CONST, ((Long) list.get(1)).longValue());
+        Assert.assertEquals(DOUBLE_CONST, ((Double) list.get(2)).doubleValue(), 0);
+        Assert.assertEquals(STRING_CONST, (String) list.get(3));
     }
-    Assert.assertTrue(sawException);
-  }
 
-  @Test
-  public void testSetWithName() throws Exception {
-    HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector());
-    boolean sawException = false;
-    try {
-      r.set("fred", null, "bob");
-    } catch (UnsupportedOperationException uoe) {
-      sawException = true;
+    @Test
+    public void testSet() throws Exception {
+        HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector());
+        boolean sawException = false;
+        try {
+            r.set(3, "Mary had a little lamb");
+        } catch (UnsupportedOperationException uoe) {
+            sawException = true;
+        }
+        Assert.assertTrue(sawException);
     }
-    Assert.assertTrue(sawException);
-  }
 
-  @Test
-  public void testRemove() throws Exception {
-    HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector());
-    boolean sawException = false;
-    try {
-      r.remove(0);
-    } catch (UnsupportedOperationException uoe) {
-      sawException = true;
+    @Test
+    public void testSize() throws Exception {
+        HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector());
+        Assert.assertEquals(4, r.size());
     }
-    Assert.assertTrue(sawException);
-  }
 
-  @Test
-  public void testCopy() throws Exception {
-    HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector());
-    boolean sawException = false;
-    try {
-      r.copy(null);
-    } catch (UnsupportedOperationException uoe) {
-      sawException = true;
+    @Test
+    public void testReadFields() throws Exception {
+        HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector());
+        boolean sawException = false;
+        try {
+            r.readFields(null);
+        } catch (UnsupportedOperationException uoe) {
+            sawException = true;
+        }
+        Assert.assertTrue(sawException);
     }
-    Assert.assertTrue(sawException);
-  }
 
-  @Test
-  public void testGetWritable() throws Exception {
-    HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector()).getWritable();
-    Assert.assertEquals(INT_CONST, ((Integer) r.get(0)).intValue());
-    Assert.assertEquals(LONG_CONST, ((Long) r.get(1)).longValue());
-    Assert.assertEquals(DOUBLE_CONST, ((Double) r.get(2)).doubleValue(), 0);
-    Assert.assertEquals(STRING_CONST, (String) r.get(3));
-    Assert.assertEquals("org.apache.hcatalog.data.DefaultHCatRecord", r.getClass().getName());
-  }
+    @Test
+    public void testWrite() throws Exception {
+        HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector());
+        boolean sawException = false;
+        try {
+            r.write(null);
+        } catch (UnsupportedOperationException uoe) {
+            sawException = true;
+        }
+        Assert.assertTrue(sawException);
+    }
 
-  private HCatRecord getHCatRecord() throws Exception {
+    @Test
+    public void testSetWithName() throws Exception {
+        HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector());
+        boolean sawException = false;
+        try {
+            r.set("fred", null, "bob");
+        } catch (UnsupportedOperationException uoe) {
+            sawException = true;
+        }
+        Assert.assertTrue(sawException);
+    }
 
-    List<Object> rec_1 = new ArrayList<Object>(4);
-    rec_1.add( new Integer(INT_CONST));
-    rec_1.add( new Long(LONG_CONST));
-    rec_1.add( new Double(DOUBLE_CONST));
-    rec_1.add( new String(STRING_CONST));
+    @Test
+    public void testRemove() throws Exception {
+        HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector());
+        boolean sawException = false;
+        try {
+            r.remove(0);
+        } catch (UnsupportedOperationException uoe) {
+            sawException = true;
+        }
+        Assert.assertTrue(sawException);
+    }
 
-    return new DefaultHCatRecord(rec_1);
-  }
+    @Test
+    public void testCopy() throws Exception {
+        HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector());
+        boolean sawException = false;
+        try {
+            r.copy(null);
+        } catch (UnsupportedOperationException uoe) {
+            sawException = true;
+        }
+        Assert.assertTrue(sawException);
+    }
 
-  private TypeInfo getTypeInfo() throws Exception {
-    List<String> names = new ArrayList<String>(4);
-    names.add("an_int");
-    names.add("a_long");
-    names.add("a_double");
-    names.add("a_string");
+    @Test
+    public void testGetWritable() throws Exception {
+        HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector()).getWritable();
+        Assert.assertEquals(INT_CONST, ((Integer) r.get(0)).intValue());
+        Assert.assertEquals(LONG_CONST, ((Long) r.get(1)).longValue());
+        Assert.assertEquals(DOUBLE_CONST, ((Double) r.get(2)).doubleValue(), 0);
+        Assert.assertEquals(STRING_CONST, (String) r.get(3));
+        Assert.assertEquals("org.apache.hcatalog.data.DefaultHCatRecord", r.getClass().getName());
+    }
 
-    List<TypeInfo> tis = new ArrayList<TypeInfo>(4);
-    tis.add(TypeInfoFactory.getPrimitiveTypeInfo("int"));
-    tis.add(TypeInfoFactory.getPrimitiveTypeInfo("bigint"));
-    tis.add(TypeInfoFactory.getPrimitiveTypeInfo("double"));
-    tis.add(TypeInfoFactory.getPrimitiveTypeInfo("string"));
+    private HCatRecord getHCatRecord() throws Exception {
+        List<Object> rec_1 = new ArrayList<Object>(4);
+        rec_1.add( new Integer(INT_CONST));
+        rec_1.add( new Long(LONG_CONST));
+        rec_1.add( new Double(DOUBLE_CONST));
+        rec_1.add( new String(STRING_CONST));
 
-    return TypeInfoFactory.getStructTypeInfo(names, tis);
+        return new DefaultHCatRecord(rec_1);
+    }
 
-  }
+    private TypeInfo getTypeInfo() throws Exception {
+        List<String> names = new ArrayList<String>(4);
+        names.add("an_int");
+        names.add("a_long");
+        names.add("a_double");
+        names.add("a_string");
 
-  private ObjectInspector getObjectInspector(TypeInfo ti) throws Exception {
-    return HCatRecordObjectInspectorFactory.getHCatRecordObjectInspector(
-        (StructTypeInfo)ti);
-  }
+        List<TypeInfo> tis = new ArrayList<TypeInfo>(4);
+        tis.add(TypeInfoFactory.getPrimitiveTypeInfo("int"));
+        tis.add(TypeInfoFactory.getPrimitiveTypeInfo("bigint"));
+        tis.add(TypeInfoFactory.getPrimitiveTypeInfo("double"));
+        tis.add(TypeInfoFactory.getPrimitiveTypeInfo("string"));
 
-  private ObjectInspector getObjectInspector() throws Exception {
-    return HCatRecordObjectInspectorFactory.getHCatRecordObjectInspector(
-          (StructTypeInfo)getTypeInfo());
-  }
+        return TypeInfoFactory.getStructTypeInfo(names, tis);
+    }
+
+    private ObjectInspector getObjectInspector(TypeInfo ti) throws Exception {
+        return HCatRecordObjectInspectorFactory.getHCatRecordObjectInspector((StructTypeInfo)ti);
+    }
+
+    private ObjectInspector getObjectInspector() throws Exception {
+        return HCatRecordObjectInspectorFactory.getHCatRecordObjectInspector(
+            (StructTypeInfo)getTypeInfo());
+    }
 }
diff --git a/src/test/org/apache/hcatalog/data/TestReaderWriter.java b/src/test/org/apache/hcatalog/data/TestReaderWriter.java
index a682855..18d368f 100644
--- a/src/test/org/apache/hcatalog/data/TestReaderWriter.java
+++ b/src/test/org/apache/hcatalog/data/TestReaderWriter.java
@@ -49,140 +49,136 @@
 
 public class TestReaderWriter extends HCatBaseTest {
 
-  @Test
-  public void test() throws MetaException, CommandNeedRetryException,
-      IOException, ClassNotFoundException {
+    @Test
+    public void test() throws MetaException, CommandNeedRetryException,
+            IOException, ClassNotFoundException {
 
-    driver.run("drop table mytbl");
-    driver.run("create table mytbl (a string, b int)");
-    Iterator<Entry<String, String>> itr = hiveConf.iterator();
-    Map<String, String> map = new HashMap<String, String>();
-    while (itr.hasNext()) {
-      Entry<String, String> kv = itr.next();
-      map.put(kv.getKey(), kv.getValue());
+        driver.run("drop table mytbl");
+        driver.run("create table mytbl (a string, b int)");
+        Iterator<Entry<String, String>> itr = hiveConf.iterator();
+        Map<String, String> map = new HashMap<String, String>();
+        while (itr.hasNext()) {
+            Entry<String, String> kv = itr.next();
+            map.put(kv.getKey(), kv.getValue());
+        }
+
+        WriterContext cntxt = runsInMaster(map);
+
+        File writeCntxtFile = File.createTempFile("hcat-write", "temp");
+        writeCntxtFile.deleteOnExit();
+
+        // Serialize context.
+        ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(writeCntxtFile));
+        oos.writeObject(cntxt);
+        oos.flush();
+        oos.close();
+
+        // Now, deserialize it.
+        ObjectInputStream ois = new ObjectInputStream(new FileInputStream(writeCntxtFile));
+        cntxt = (WriterContext) ois.readObject();
+        ois.close();
+
+        runsInSlave(cntxt);
+        commit(map, true, cntxt);
+
+        ReaderContext readCntxt = runsInMaster(map, false);
+
+        File readCntxtFile = File.createTempFile("hcat-read", "temp");
+        readCntxtFile.deleteOnExit();
+        oos = new ObjectOutputStream(new FileOutputStream(readCntxtFile));
+        oos.writeObject(readCntxt);
+        oos.flush();
+        oos.close();
+
+        ois = new ObjectInputStream(new FileInputStream(readCntxtFile));
+        readCntxt = (ReaderContext) ois.readObject();
+        ois.close();
+
+        for (InputSplit split : readCntxt.getSplits()) {
+            runsInSlave(split, readCntxt.getConf());
+        }
     }
 
-    WriterContext cntxt = runsInMaster(map);
+    private WriterContext runsInMaster(Map<String, String> config) throws HCatException {
 
-    File writeCntxtFile = File.createTempFile("hcat-write", "temp");
-    writeCntxtFile.deleteOnExit();
-
-    // Serialize context.
-    ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(
-        writeCntxtFile));
-    oos.writeObject(cntxt);
-    oos.flush();
-    oos.close();
-
-    // Now, deserialize it.
-    ObjectInputStream ois = new ObjectInputStream(new FileInputStream(
-        writeCntxtFile));
-    cntxt = (WriterContext) ois.readObject();
-    ois.close();
-
-    runsInSlave(cntxt);
-    commit(map, true, cntxt);
-
-    ReaderContext readCntxt = runsInMaster(map, false);
-
-    File readCntxtFile = File.createTempFile("hcat-read", "temp");
-    readCntxtFile.deleteOnExit();
-    oos = new ObjectOutputStream(new FileOutputStream(readCntxtFile));
-    oos.writeObject(readCntxt);
-    oos.flush();
-    oos.close();
-
-    ois = new ObjectInputStream(new FileInputStream(readCntxtFile));
-    readCntxt = (ReaderContext) ois.readObject();
-    ois.close();
-
-    for (InputSplit split : readCntxt.getSplits()) {
-      runsInSlave(split, readCntxt.getConf());
-    }
-  }
-
-  private WriterContext runsInMaster(Map<String, String> config)
-      throws HCatException {
-
-    WriteEntity.Builder builder = new WriteEntity.Builder();
-    WriteEntity entity = builder.withTable("mytbl").build();
-    HCatWriter writer = DataTransferFactory.getHCatWriter(entity, config);
-    WriterContext info = writer.prepareWrite();
-    return info;
-  }
-
-  private ReaderContext runsInMaster(Map<String, String> config, boolean bogus)
-      throws HCatException {
-
-    ReadEntity.Builder builder = new ReadEntity.Builder();
-    ReadEntity entity = builder.withTable("mytbl").build();
-    HCatReader reader = DataTransferFactory.getHCatReader(entity, config);
-    ReaderContext cntxt = reader.prepareRead();
-    return cntxt;
-  }
-
-  private void runsInSlave(InputSplit split, Configuration config)
-      throws HCatException {
-
-    HCatReader reader = DataTransferFactory.getHCatReader(split, config);
-    Iterator<HCatRecord> itr = reader.read();
-    int i = 1;
-    while (itr.hasNext()) {
-      HCatRecord read = itr.next();
-      HCatRecord written = getRecord(i++);
-      // Argh, HCatRecord doesnt implement equals()
-      Assert.assertTrue("Read: " + read.get(0) + "Written: " + written.get(0),
-          written.get(0).equals(read.get(0)));
-      Assert.assertTrue("Read: " + read.get(1) + "Written: " + written.get(1),
-          written.get(1).equals(read.get(1)));
-      Assert.assertEquals(2, read.size());
-    }
-    //Assert.assertFalse(itr.hasNext());
-  }
-
-  private void runsInSlave(WriterContext context) throws HCatException {
-
-    HCatWriter writer = DataTransferFactory.getHCatWriter(context);
-    writer.write(new HCatRecordItr());
-  }
-
-  private void commit(Map<String, String> config, boolean status,
-      WriterContext context) throws IOException {
-
-    WriteEntity.Builder builder = new WriteEntity.Builder();
-    WriteEntity entity = builder.withTable("mytbl").build();
-    HCatWriter writer = DataTransferFactory.getHCatWriter(entity, config);
-    if (status) {
-      writer.commit(context);
-    } else {
-      writer.abort(context);
-    }
-  }
-
-  private static HCatRecord getRecord(int i) {
-    List<Object> list = new ArrayList<Object>(2);
-    list.add("Row #: " + i);
-    list.add(i);
-    return new DefaultHCatRecord(list);
-  }
-
-  private static class HCatRecordItr implements Iterator<HCatRecord> {
-
-    int i = 0;
-
-    @Override
-    public boolean hasNext() {
-      return i++ < 100 ? true : false;
+        WriteEntity.Builder builder = new WriteEntity.Builder();
+        WriteEntity entity = builder.withTable("mytbl").build();
+        HCatWriter writer = DataTransferFactory.getHCatWriter(entity, config);
+        WriterContext info = writer.prepareWrite();
+        return info;
     }
 
-    @Override
-    public HCatRecord next() {
-      return getRecord(i);
+    private ReaderContext runsInMaster(Map<String, String> config, boolean bogus)
+        throws HCatException {
+
+        ReadEntity.Builder builder = new ReadEntity.Builder();
+        ReadEntity entity = builder.withTable("mytbl").build();
+        HCatReader reader = DataTransferFactory.getHCatReader(entity, config);
+        ReaderContext cntxt = reader.prepareRead();
+        return cntxt;
     }
 
-    @Override
-    public void remove() {
-      throw new RuntimeException();
+    private void runsInSlave(InputSplit split, Configuration config) throws HCatException {
+
+        HCatReader reader = DataTransferFactory.getHCatReader(split, config);
+        Iterator<HCatRecord> itr = reader.read();
+        int i = 1;
+        while (itr.hasNext()) {
+            HCatRecord read = itr.next();
+            HCatRecord written = getRecord(i++);
+            // Argh, HCatRecord doesnt implement equals()
+            Assert.assertTrue("Read: " + read.get(0) + "Written: " + written.get(0),
+                written.get(0).equals(read.get(0)));
+            Assert.assertTrue("Read: " + read.get(1) + "Written: " + written.get(1),
+                written.get(1).equals(read.get(1)));
+            Assert.assertEquals(2, read.size());
+        }
+        //Assert.assertFalse(itr.hasNext());
     }
-  }
+
+    private void runsInSlave(WriterContext context) throws HCatException {
+
+        HCatWriter writer = DataTransferFactory.getHCatWriter(context);
+        writer.write(new HCatRecordItr());
+    }
+
+    private void commit(Map<String, String> config, boolean status,
+            WriterContext context) throws IOException {
+
+        WriteEntity.Builder builder = new WriteEntity.Builder();
+        WriteEntity entity = builder.withTable("mytbl").build();
+        HCatWriter writer = DataTransferFactory.getHCatWriter(entity, config);
+        if (status) {
+            writer.commit(context);
+        } else {
+            writer.abort(context);
+        }
+    }
+
+    private static HCatRecord getRecord(int i) {
+        List<Object> list = new ArrayList<Object>(2);
+        list.add("Row #: " + i);
+        list.add(i);
+        return new DefaultHCatRecord(list);
+    }
+
+    private static class HCatRecordItr implements Iterator<HCatRecord> {
+
+        int i = 0;
+
+        @Override
+        public boolean hasNext() {
+            return i++ < 100 ? true : false;
+        }
+
+        @Override
+        public HCatRecord next() {
+            return getRecord(i);
+        }
+
+        @Override
+        public void remove() {
+            throw new RuntimeException();
+        }
+    }
 }
diff --git a/src/test/org/apache/hcatalog/data/schema/TestHCatSchema.java b/src/test/org/apache/hcatalog/data/schema/TestHCatSchema.java
index 8ed2dab..9402b4d 100644
--- a/src/test/org/apache/hcatalog/data/schema/TestHCatSchema.java
+++ b/src/test/org/apache/hcatalog/data/schema/TestHCatSchema.java
@@ -24,55 +24,55 @@
 import java.util.List;
 
 public class TestHCatSchema extends TestCase {
-  public void testCannotAddFieldMoreThanOnce() throws HCatException {
-    List<HCatFieldSchema> fieldSchemaList = new ArrayList<HCatFieldSchema>();
-    fieldSchemaList.add(new HCatFieldSchema("name", HCatFieldSchema.Type.STRING, "What's your handle?"));
-    fieldSchemaList.add(new HCatFieldSchema("age", HCatFieldSchema.Type.INT, "So very old"));
+    public void testCannotAddFieldMoreThanOnce() throws HCatException {
+        List<HCatFieldSchema> fieldSchemaList = new ArrayList<HCatFieldSchema>();
+        fieldSchemaList.add(new HCatFieldSchema("name", HCatFieldSchema.Type.STRING, "What's your handle?"));
+        fieldSchemaList.add(new HCatFieldSchema("age", HCatFieldSchema.Type.INT, "So very old"));
 
-    HCatSchema schema = new HCatSchema(fieldSchemaList);
+        HCatSchema schema = new HCatSchema(fieldSchemaList);
 
-    assertTrue(schema.getFieldNames().contains("age"));
-    assertEquals(2, schema.getFields().size());
+        assertTrue(schema.getFieldNames().contains("age"));
+        assertEquals(2, schema.getFields().size());
 
-    try {
-      schema.append(new HCatFieldSchema("age", HCatFieldSchema.Type.INT, "So very old"));
-      fail("Was able to append field schema with same name");
-    } catch(HCatException he) {
-      assertTrue(he.getMessage().contains("Attempt to append HCatFieldSchema with already existing name: age."));
+        try {
+            schema.append(new HCatFieldSchema("age", HCatFieldSchema.Type.INT, "So very old"));
+            fail("Was able to append field schema with same name");
+        } catch (HCatException he) {
+            assertTrue(he.getMessage().contains("Attempt to append HCatFieldSchema with already existing name: age."));
+        }
+
+        assertTrue(schema.getFieldNames().contains("age"));
+        assertEquals(2, schema.getFields().size());
+
+        // Should also not be able to add fields of different types with same name
+        try {
+            schema.append(new HCatFieldSchema("age", HCatFieldSchema.Type.STRING, "Maybe spelled out?"));
+            fail("Was able to append field schema with same name");
+        } catch (HCatException he) {
+            assertTrue(he.getMessage().contains("Attempt to append HCatFieldSchema with already existing name: age."));
+        }
+
+        assertTrue(schema.getFieldNames().contains("age"));
+        assertEquals(2, schema.getFields().size());
     }
 
-    assertTrue(schema.getFieldNames().contains("age"));
-    assertEquals(2, schema.getFields().size());
+    public void testCannotInstantiateSchemaWithRepeatedFieldNames() throws HCatException {
+        List<HCatFieldSchema> fieldSchemaList = new ArrayList<HCatFieldSchema>();
 
-    // Should also not be able to add fields of different types with same name
-    try {
-      schema.append(new HCatFieldSchema("age", HCatFieldSchema.Type.STRING, "Maybe spelled out?"));
-      fail("Was able to append field schema with same name");
-    } catch(HCatException he) {
-      assertTrue(he.getMessage().contains("Attempt to append HCatFieldSchema with already existing name: age."));
+        fieldSchemaList.add(new HCatFieldSchema("memberID", HCatFieldSchema.Type.INT, "as a number"));
+        fieldSchemaList.add(new HCatFieldSchema("location", HCatFieldSchema.Type.STRING, "there's Waldo"));
+
+        // No duplicate names.  This should be ok
+        HCatSchema schema = new HCatSchema(fieldSchemaList);
+
+        fieldSchemaList.add(new HCatFieldSchema("memberID", HCatFieldSchema.Type.STRING, "as a String"));
+
+        // Now a duplicated field name.  Should fail
+        try {
+            HCatSchema schema2 = new HCatSchema(fieldSchemaList);
+            fail("Able to add duplicate field name");
+        } catch (IllegalArgumentException iae) {
+            assertTrue(iae.getMessage().contains("Field named memberID already exists"));
+        }
     }
-
-    assertTrue(schema.getFieldNames().contains("age"));
-    assertEquals(2, schema.getFields().size());
-  }
-
-  public void testCannotInstantiateSchemaWithRepeatedFieldNames() throws HCatException {
-      List<HCatFieldSchema> fieldSchemaList = new ArrayList<HCatFieldSchema>();
-
-      fieldSchemaList.add(new HCatFieldSchema("memberID", HCatFieldSchema.Type.INT, "as a number"));
-      fieldSchemaList.add(new HCatFieldSchema("location", HCatFieldSchema.Type.STRING, "there's Waldo"));
-
-      // No duplicate names.  This should be ok
-      HCatSchema schema = new HCatSchema(fieldSchemaList);
-
-      fieldSchemaList.add(new HCatFieldSchema("memberID", HCatFieldSchema.Type.STRING, "as a String"));
-
-      // Now a duplicated field name.  Should fail
-      try {
-        HCatSchema schema2 = new HCatSchema(fieldSchemaList);
-        fail("Able to add duplicate field name");
-      } catch (IllegalArgumentException iae) {
-        assertTrue(iae.getMessage().contains("Field named memberID already exists"));
-      }
-  }
 }
diff --git a/src/test/org/apache/hcatalog/data/schema/TestHCatSchemaUtils.java b/src/test/org/apache/hcatalog/data/schema/TestHCatSchemaUtils.java
index 19272ec..1d07621 100644
--- a/src/test/org/apache/hcatalog/data/schema/TestHCatSchemaUtils.java
+++ b/src/test/org/apache/hcatalog/data/schema/TestHCatSchemaUtils.java
@@ -31,47 +31,49 @@
 public class TestHCatSchemaUtils extends TestCase {
 
     private static final Logger LOG = LoggerFactory.getLogger(TestHCatSchemaUtils.class);
-    public void testSimpleOperation() throws Exception{
+
+    public void testSimpleOperation() throws Exception {
         String typeString = "struct<name:string,studentid:int,"
-            + "contact:struct<phno:string,email:string>,"
-            + "currently_registered_courses:array<string>,"
-            + "current_grades:map<string,string>,"
-            + "phnos:array<struct<phno:string,type:string>>,blah:array<int>>";
+                + "contact:struct<phno:string,email:string>,"
+                + "currently_registered_courses:array<string>,"
+                + "current_grades:map<string,string>,"
+                + "phnos:array<struct<phno:string,type:string>>,blah:array<int>>";
 
         TypeInfo ti = TypeInfoUtils.getTypeInfoFromTypeString(typeString);
 
         HCatSchema hsch = HCatSchemaUtils.getHCatSchemaFromTypeString(typeString);
-        LOG.info("Type name : {}",ti.getTypeName());
-        LOG.info("HCatSchema : {}",hsch);
+        LOG.info("Type name : {}", ti.getTypeName());
+        LOG.info("HCatSchema : {}", hsch);
         assertEquals(hsch.size(), 1);
-        assertEquals(ti.getTypeName(),hsch.get(0).getTypeString());
-        assertEquals(hsch.get(0).getTypeString(),typeString);
+        assertEquals(ti.getTypeName(), hsch.get(0).getTypeString());
+        assertEquals(hsch.get(0).getTypeString(), typeString);
     }
 
     @SuppressWarnings("unused")
     private void pretty_print(PrintStream pout, HCatSchema hsch) throws HCatException {
-        pretty_print(pout,hsch,"");
+        pretty_print(pout, hsch, "");
     }
 
 
     private void pretty_print(PrintStream pout, HCatSchema hsch, String prefix) throws HCatException {
         int i = 0;
-        for (HCatFieldSchema field : hsch.getFields()){
-            pretty_print(pout,field,prefix+"."+(field.getName()==null?i:field.getName()));
+        for (HCatFieldSchema field : hsch.getFields()) {
+            pretty_print(pout, field, prefix + "." + (field.getName() == null ? i : field.getName()));
             i++;
         }
     }
+
     private void pretty_print(PrintStream pout, HCatFieldSchema hfsch, String prefix) throws HCatException {
 
         Category tcat = hfsch.getCategory();
-        if (Category.STRUCT == tcat){
-            pretty_print(pout,hfsch.getStructSubSchema(),prefix);
-        }else if (Category.ARRAY == tcat){
-            pretty_print(pout,hfsch.getArrayElementSchema(),prefix);
-        }else if (Category.MAP == tcat){
+        if (Category.STRUCT == tcat) {
+            pretty_print(pout, hfsch.getStructSubSchema(), prefix);
+        } else if (Category.ARRAY == tcat) {
+            pretty_print(pout, hfsch.getArrayElementSchema(), prefix);
+        } else if (Category.MAP == tcat) {
             pout.println(prefix + ".mapkey:\t" + hfsch.getMapKeyType().toString());
-            pretty_print(pout,hfsch.getMapValueSchema(),prefix+".mapvalue:");
-        }else{
+            pretty_print(pout, hfsch.getMapValueSchema(), prefix + ".mapvalue:");
+        } else {
             pout.println(prefix + "\t" + hfsch.getType().toString());
         }
     }
diff --git a/src/test/org/apache/hcatalog/listener/TestMsgBusConnection.java b/src/test/org/apache/hcatalog/listener/TestMsgBusConnection.java
index 4da3cef..0db2b16 100644
--- a/src/test/org/apache/hcatalog/listener/TestMsgBusConnection.java
+++ b/src/test/org/apache/hcatalog/listener/TestMsgBusConnection.java
@@ -43,80 +43,80 @@
 
 public class TestMsgBusConnection extends TestCase {
 
-  private Driver driver;
-  private BrokerService broker;
-  private MessageConsumer consumer;
+    private Driver driver;
+    private BrokerService broker;
+    private MessageConsumer consumer;
 
-  @Override
-  protected void setUp() throws Exception {
+    @Override
+    protected void setUp() throws Exception {
 
-    super.setUp();
-    broker = new BrokerService();
-    // configure the broker
-    broker.addConnector("tcp://localhost:61616?broker.persistent=false");
+        super.setUp();
+        broker = new BrokerService();
+        // configure the broker
+        broker.addConnector("tcp://localhost:61616?broker.persistent=false");
 
-    broker.start();
+        broker.start();
 
-    System.setProperty("java.naming.factory.initial",
-        "org.apache.activemq.jndi.ActiveMQInitialContextFactory");
-    System.setProperty("java.naming.provider.url", "tcp://localhost:61616");
-    connectClient();
-    HiveConf hiveConf = new HiveConf(this.getClass());
-    hiveConf.set(ConfVars.METASTORE_EVENT_LISTENERS.varname,
-        NotificationListener.class.getName());
-    hiveConf.set("hive.metastore.local", "true");
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    hiveConf.set(HCatConstants.HCAT_MSGBUS_TOPIC_PREFIX, "planetlab.hcat");
-    SessionState.start(new CliSessionState(hiveConf));
-    driver = new Driver(hiveConf);
-  }
-
-  private void connectClient() throws JMSException {
-    ConnectionFactory connFac = new ActiveMQConnectionFactory(
-        "tcp://localhost:61616");
-    Connection conn = connFac.createConnection();
-    conn.start();
-    Session session = conn.createSession(true, Session.SESSION_TRANSACTED);
-    Destination hcatTopic = session.createTopic("planetlab.hcat");
-    consumer = session.createConsumer(hcatTopic);
-  }
-
-  public void testConnection() throws Exception {
-
-    try {
-      driver.run("create database testconndb");
-      Message msg = consumer.receive();
-      assertEquals(HCatConstants.HCAT_ADD_DATABASE_EVENT,
-          msg.getStringProperty(HCatConstants.HCAT_EVENT));
-      assertEquals("topic://planetlab.hcat", msg.getJMSDestination().toString());
-      assertEquals("testconndb",
-          ((Database) ((ObjectMessage) msg).getObject()).getName());
-      broker.stop();
-      driver.run("drop database testconndb cascade");
-      broker.start(true);
-      connectClient();
-      driver.run("create database testconndb");
-      msg = consumer.receive();
-      assertEquals(HCatConstants.HCAT_ADD_DATABASE_EVENT,
-          msg.getStringProperty(HCatConstants.HCAT_EVENT));
-      assertEquals("topic://planetlab.hcat", msg.getJMSDestination().toString());
-      assertEquals("testconndb",
-          ((Database) ((ObjectMessage) msg).getObject()).getName());
-      driver.run("drop database testconndb cascade");
-      msg = consumer.receive();
-      assertEquals(HCatConstants.HCAT_DROP_DATABASE_EVENT,
-          msg.getStringProperty(HCatConstants.HCAT_EVENT));
-      assertEquals("topic://planetlab.hcat", msg.getJMSDestination().toString());
-      assertEquals("testconndb",
-          ((Database) ((ObjectMessage) msg).getObject()).getName());
-    } catch (NoSuchObjectException nsoe) {
-      nsoe.printStackTrace(System.err);
-      assert false;
-    } catch (AlreadyExistsException aee) {
-      aee.printStackTrace(System.err);
-      assert false;
+        System.setProperty("java.naming.factory.initial",
+                "org.apache.activemq.jndi.ActiveMQInitialContextFactory");
+        System.setProperty("java.naming.provider.url", "tcp://localhost:61616");
+        connectClient();
+        HiveConf hiveConf = new HiveConf(this.getClass());
+        hiveConf.set(ConfVars.METASTORE_EVENT_LISTENERS.varname,
+                NotificationListener.class.getName());
+        hiveConf.set("hive.metastore.local", "true");
+        hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+        hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+        hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+        hiveConf.set(HCatConstants.HCAT_MSGBUS_TOPIC_PREFIX, "planetlab.hcat");
+        SessionState.start(new CliSessionState(hiveConf));
+        driver = new Driver(hiveConf);
     }
-  }
+
+    private void connectClient() throws JMSException {
+        ConnectionFactory connFac = new ActiveMQConnectionFactory(
+                "tcp://localhost:61616");
+        Connection conn = connFac.createConnection();
+        conn.start();
+        Session session = conn.createSession(true, Session.SESSION_TRANSACTED);
+        Destination hcatTopic = session.createTopic("planetlab.hcat");
+        consumer = session.createConsumer(hcatTopic);
+    }
+
+    public void testConnection() throws Exception {
+
+        try {
+            driver.run("create database testconndb");
+            Message msg = consumer.receive();
+            assertEquals(HCatConstants.HCAT_ADD_DATABASE_EVENT,
+                    msg.getStringProperty(HCatConstants.HCAT_EVENT));
+            assertEquals("topic://planetlab.hcat", msg.getJMSDestination().toString());
+            assertEquals("testconndb",
+                    ((Database) ((ObjectMessage) msg).getObject()).getName());
+            broker.stop();
+            driver.run("drop database testconndb cascade");
+            broker.start(true);
+            connectClient();
+            driver.run("create database testconndb");
+            msg = consumer.receive();
+            assertEquals(HCatConstants.HCAT_ADD_DATABASE_EVENT,
+                    msg.getStringProperty(HCatConstants.HCAT_EVENT));
+            assertEquals("topic://planetlab.hcat", msg.getJMSDestination().toString());
+            assertEquals("testconndb",
+                    ((Database) ((ObjectMessage) msg).getObject()).getName());
+            driver.run("drop database testconndb cascade");
+            msg = consumer.receive();
+            assertEquals(HCatConstants.HCAT_DROP_DATABASE_EVENT,
+                    msg.getStringProperty(HCatConstants.HCAT_EVENT));
+            assertEquals("topic://planetlab.hcat", msg.getJMSDestination().toString());
+            assertEquals("testconndb",
+                    ((Database) ((ObjectMessage) msg).getObject()).getName());
+        } catch (NoSuchObjectException nsoe) {
+            nsoe.printStackTrace(System.err);
+            assert false;
+        } catch (AlreadyExistsException aee) {
+            aee.printStackTrace(System.err);
+            assert false;
+        }
+    }
 }
diff --git a/src/test/org/apache/hcatalog/listener/TestNotificationListener.java b/src/test/org/apache/hcatalog/listener/TestNotificationListener.java
index 7d09d79..58ef72c 100644
--- a/src/test/org/apache/hcatalog/listener/TestNotificationListener.java
+++ b/src/test/org/apache/hcatalog/listener/TestNotificationListener.java
@@ -59,136 +59,136 @@
 import junit.framework.TestCase;
 
 public class TestNotificationListener extends TestCase implements
-    MessageListener {
+        MessageListener {
 
-  private HiveConf hiveConf;
-  private Driver driver;
-  private AtomicInteger cntInvocation = new AtomicInteger(0);
+    private HiveConf hiveConf;
+    private Driver driver;
+    private AtomicInteger cntInvocation = new AtomicInteger(0);
 
-  @Override
-  protected void setUp() throws Exception {
+    @Override
+    protected void setUp() throws Exception {
 
-    super.setUp();
-    System.setProperty("java.naming.factory.initial",
-        "org.apache.activemq.jndi.ActiveMQInitialContextFactory");
-    System.setProperty("java.naming.provider.url",
-        "vm://localhost?broker.persistent=false");
-    ConnectionFactory connFac = new ActiveMQConnectionFactory(
-        "vm://localhost?broker.persistent=false");
-    Connection conn = connFac.createConnection();
-    conn.start();
-    // We want message to be sent when session commits, thus we run in
-    // transacted mode.
-    Session session = conn.createSession(true, Session.SESSION_TRANSACTED);
-    Destination hcatTopic = session
-        .createTopic(HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX);
-    MessageConsumer consumer1 = session.createConsumer(hcatTopic);
-    consumer1.setMessageListener(this);
-    Destination tblTopic = session
-        .createTopic(HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX + ".mydb.mytbl");
-    MessageConsumer consumer2 = session.createConsumer(tblTopic);
-    consumer2.setMessageListener(this);
-    Destination dbTopic = session
-        .createTopic(HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX + ".mydb");
-    MessageConsumer consumer3 = session.createConsumer(dbTopic);
-    consumer3.setMessageListener(this);
-    hiveConf = new HiveConf(this.getClass());
-    hiveConf.set(ConfVars.METASTORE_EVENT_LISTENERS.varname,
-        NotificationListener.class.getName());
-    hiveConf.set("hive.metastore.local", "true");
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    SessionState.start(new CliSessionState(hiveConf));
-    driver = new Driver(hiveConf);
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    assertEquals(7, cntInvocation.get());
-    super.tearDown();
-  }
-
-  public void testAMQListener() throws MetaException, TException,
-      UnknownTableException, NoSuchObjectException, CommandNeedRetryException,
-      UnknownDBException, InvalidPartitionException, UnknownPartitionException {
-    driver.run("create database mydb");
-    driver.run("use mydb");
-    driver.run("create table mytbl (a string) partitioned by (b string)");
-    driver.run("alter table mytbl add partition(b='2011')");
-    HiveMetaStoreClient msc = new HiveMetaStoreClient(hiveConf);
-    Map<String, String> kvs = new HashMap<String, String>(1);
-    kvs.put("b", "2011");
-    msc.markPartitionForEvent("mydb", "mytbl", kvs,
-        PartitionEventType.LOAD_DONE);
-    driver.run("alter table mytbl drop partition(b='2011')");
-    driver.run("drop table mytbl");
-    driver.run("drop database mydb");
-  }
-
-  @Override
-  public void onMessage(Message msg) {
-    cntInvocation.incrementAndGet();
-
-    String event;
-    try {
-      event = msg.getStringProperty(HCatConstants.HCAT_EVENT);
-      if (event.equals(HCatConstants.HCAT_ADD_DATABASE_EVENT)) {
-
-        assertEquals("topic://" + HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX, msg
-            .getJMSDestination().toString());
-        assertEquals("mydb",
-            ((Database) ((ObjectMessage) msg).getObject()).getName());
-      } else if (event.equals(HCatConstants.HCAT_ADD_TABLE_EVENT)) {
-
-        assertEquals("topic://hcat.mydb", msg.getJMSDestination().toString());
-        Table tbl = (Table) (((ObjectMessage) msg).getObject());
-        assertEquals("mytbl", tbl.getTableName());
-        assertEquals("mydb", tbl.getDbName());
-        assertEquals(1, tbl.getPartitionKeysSize());
-      } else if (event.equals(HCatConstants.HCAT_ADD_PARTITION_EVENT)) {
-
-        assertEquals("topic://hcat.mydb.mytbl", msg.getJMSDestination()
-            .toString());
-        Partition part = (Partition) (((ObjectMessage) msg).getObject());
-        assertEquals("mytbl", part.getTableName());
-        assertEquals("mydb", part.getDbName());
-        List<String> vals = new ArrayList<String>(1);
-        vals.add("2011");
-        assertEquals(vals, part.getValues());
-      } else if (event.equals(HCatConstants.HCAT_DROP_PARTITION_EVENT)) {
-
-        assertEquals("topic://hcat.mydb.mytbl", msg.getJMSDestination()
-            .toString());
-        Partition part = (Partition) (((ObjectMessage) msg).getObject());
-        assertEquals("mytbl", part.getTableName());
-        assertEquals("mydb", part.getDbName());
-        List<String> vals = new ArrayList<String>(1);
-        vals.add("2011");
-        assertEquals(vals, part.getValues());
-      } else if (event.equals(HCatConstants.HCAT_DROP_TABLE_EVENT)) {
-
-        assertEquals("topic://hcat.mydb", msg.getJMSDestination().toString());
-        Table tbl = (Table) (((ObjectMessage) msg).getObject());
-        assertEquals("mytbl", tbl.getTableName());
-        assertEquals("mydb", tbl.getDbName());
-        assertEquals(1, tbl.getPartitionKeysSize());
-      } else if (event.equals(HCatConstants.HCAT_DROP_DATABASE_EVENT)) {
-
-        assertEquals("topic://" + HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX, msg
-            .getJMSDestination().toString());
-        assertEquals("mydb",
-            ((Database) ((ObjectMessage) msg).getObject()).getName());
-      } else if (event.equals(HCatConstants.HCAT_PARTITION_DONE_EVENT)) {
-        assertEquals("topic://hcat.mydb.mytbl", msg.getJMSDestination()
-            .toString());
-        MapMessage mapMsg = (MapMessage) msg;
-        assert mapMsg.getString("b").equals("2011");
-      } else
-        assert false;
-    } catch (JMSException e) {
-      e.printStackTrace(System.err);
-      assert false;
+        super.setUp();
+        System.setProperty("java.naming.factory.initial",
+                "org.apache.activemq.jndi.ActiveMQInitialContextFactory");
+        System.setProperty("java.naming.provider.url",
+                "vm://localhost?broker.persistent=false");
+        ConnectionFactory connFac = new ActiveMQConnectionFactory(
+                "vm://localhost?broker.persistent=false");
+        Connection conn = connFac.createConnection();
+        conn.start();
+        // We want message to be sent when session commits, thus we run in
+        // transacted mode.
+        Session session = conn.createSession(true, Session.SESSION_TRANSACTED);
+        Destination hcatTopic = session
+                .createTopic(HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX);
+        MessageConsumer consumer1 = session.createConsumer(hcatTopic);
+        consumer1.setMessageListener(this);
+        Destination tblTopic = session
+                .createTopic(HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX + ".mydb.mytbl");
+        MessageConsumer consumer2 = session.createConsumer(tblTopic);
+        consumer2.setMessageListener(this);
+        Destination dbTopic = session
+                .createTopic(HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX + ".mydb");
+        MessageConsumer consumer3 = session.createConsumer(dbTopic);
+        consumer3.setMessageListener(this);
+        hiveConf = new HiveConf(this.getClass());
+        hiveConf.set(ConfVars.METASTORE_EVENT_LISTENERS.varname,
+                NotificationListener.class.getName());
+        hiveConf.set("hive.metastore.local", "true");
+        hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+        hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+        hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+        SessionState.start(new CliSessionState(hiveConf));
+        driver = new Driver(hiveConf);
     }
-  }
+
+    @Override
+    protected void tearDown() throws Exception {
+        assertEquals(7, cntInvocation.get());
+        super.tearDown();
+    }
+
+    public void testAMQListener() throws MetaException, TException,
+            UnknownTableException, NoSuchObjectException, CommandNeedRetryException,
+            UnknownDBException, InvalidPartitionException, UnknownPartitionException {
+        driver.run("create database mydb");
+        driver.run("use mydb");
+        driver.run("create table mytbl (a string) partitioned by (b string)");
+        driver.run("alter table mytbl add partition(b='2011')");
+        HiveMetaStoreClient msc = new HiveMetaStoreClient(hiveConf);
+        Map<String, String> kvs = new HashMap<String, String>(1);
+        kvs.put("b", "2011");
+        msc.markPartitionForEvent("mydb", "mytbl", kvs,
+                PartitionEventType.LOAD_DONE);
+        driver.run("alter table mytbl drop partition(b='2011')");
+        driver.run("drop table mytbl");
+        driver.run("drop database mydb");
+    }
+
+    @Override
+    public void onMessage(Message msg) {
+        cntInvocation.incrementAndGet();
+
+        String event;
+        try {
+            event = msg.getStringProperty(HCatConstants.HCAT_EVENT);
+            if (event.equals(HCatConstants.HCAT_ADD_DATABASE_EVENT)) {
+
+                assertEquals("topic://" + HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX, msg
+                        .getJMSDestination().toString());
+                assertEquals("mydb",
+                        ((Database) ((ObjectMessage) msg).getObject()).getName());
+            } else if (event.equals(HCatConstants.HCAT_ADD_TABLE_EVENT)) {
+
+                assertEquals("topic://hcat.mydb", msg.getJMSDestination().toString());
+                Table tbl = (Table) (((ObjectMessage) msg).getObject());
+                assertEquals("mytbl", tbl.getTableName());
+                assertEquals("mydb", tbl.getDbName());
+                assertEquals(1, tbl.getPartitionKeysSize());
+            } else if (event.equals(HCatConstants.HCAT_ADD_PARTITION_EVENT)) {
+
+                assertEquals("topic://hcat.mydb.mytbl", msg.getJMSDestination()
+                        .toString());
+                Partition part = (Partition) (((ObjectMessage) msg).getObject());
+                assertEquals("mytbl", part.getTableName());
+                assertEquals("mydb", part.getDbName());
+                List<String> vals = new ArrayList<String>(1);
+                vals.add("2011");
+                assertEquals(vals, part.getValues());
+            } else if (event.equals(HCatConstants.HCAT_DROP_PARTITION_EVENT)) {
+
+                assertEquals("topic://hcat.mydb.mytbl", msg.getJMSDestination()
+                        .toString());
+                Partition part = (Partition) (((ObjectMessage) msg).getObject());
+                assertEquals("mytbl", part.getTableName());
+                assertEquals("mydb", part.getDbName());
+                List<String> vals = new ArrayList<String>(1);
+                vals.add("2011");
+                assertEquals(vals, part.getValues());
+            } else if (event.equals(HCatConstants.HCAT_DROP_TABLE_EVENT)) {
+
+                assertEquals("topic://hcat.mydb", msg.getJMSDestination().toString());
+                Table tbl = (Table) (((ObjectMessage) msg).getObject());
+                assertEquals("mytbl", tbl.getTableName());
+                assertEquals("mydb", tbl.getDbName());
+                assertEquals(1, tbl.getPartitionKeysSize());
+            } else if (event.equals(HCatConstants.HCAT_DROP_DATABASE_EVENT)) {
+
+                assertEquals("topic://" + HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX, msg
+                        .getJMSDestination().toString());
+                assertEquals("mydb",
+                        ((Database) ((ObjectMessage) msg).getObject()).getName());
+            } else if (event.equals(HCatConstants.HCAT_PARTITION_DONE_EVENT)) {
+                assertEquals("topic://hcat.mydb.mytbl", msg.getJMSDestination()
+                        .toString());
+                MapMessage mapMsg = (MapMessage) msg;
+                assert mapMsg.getString("b").equals("2011");
+            } else
+                assert false;
+        } catch (JMSException e) {
+            e.printStackTrace(System.err);
+            assert false;
+        }
+    }
 }
diff --git a/src/test/org/apache/hcatalog/mapreduce/HCatBaseTest.java b/src/test/org/apache/hcatalog/mapreduce/HCatBaseTest.java
index 7c36a82..4302e67 100644
--- a/src/test/org/apache/hcatalog/mapreduce/HCatBaseTest.java
+++ b/src/test/org/apache/hcatalog/mapreduce/HCatBaseTest.java
@@ -38,41 +38,41 @@
  * Simplify writing HCatalog tests that require a HiveMetaStore.
  */
 public class HCatBaseTest {
-  protected static final Logger LOG = LoggerFactory.getLogger(HCatBaseTest.class);
-  protected static final String TEST_DATA_DIR = System.getProperty("user.dir") +
-      "/build/test/data/" + HCatBaseTest.class.getCanonicalName();
-  protected static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse";
+    protected static final Logger LOG = LoggerFactory.getLogger(HCatBaseTest.class);
+    protected static final String TEST_DATA_DIR = System.getProperty("user.dir") +
+            "/build/test/data/" + HCatBaseTest.class.getCanonicalName();
+    protected static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse";
 
-  protected HiveConf hiveConf = null;
-  protected Driver driver = null;
-  protected HiveMetaStoreClient client = null;
+    protected HiveConf hiveConf = null;
+    protected Driver driver = null;
+    protected HiveMetaStoreClient client = null;
 
-  @BeforeClass
-  public static void setUpTestDataDir() throws Exception {
-    LOG.info("Using warehouse directory " + TEST_WAREHOUSE_DIR);
-    File f = new File(TEST_WAREHOUSE_DIR);
-    if (f.exists()) {
-      FileUtil.fullyDelete(f);
+    @BeforeClass
+    public static void setUpTestDataDir() throws Exception {
+        LOG.info("Using warehouse directory " + TEST_WAREHOUSE_DIR);
+        File f = new File(TEST_WAREHOUSE_DIR);
+        if (f.exists()) {
+            FileUtil.fullyDelete(f);
+        }
+        Assert.assertTrue(new File(TEST_WAREHOUSE_DIR).mkdirs());
     }
-    Assert.assertTrue(new File(TEST_WAREHOUSE_DIR).mkdirs());
-  }
 
-  @Before
-  public void setUp() throws Exception {
-    if (driver == null) {
-      hiveConf = new HiveConf(this.getClass());
-      hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-      hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-      hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-      hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR);
-      driver = new Driver(hiveConf);
-      client = new HiveMetaStoreClient(hiveConf);
-      SessionState.start(new CliSessionState(hiveConf));
+    @Before
+    public void setUp() throws Exception {
+        if (driver == null) {
+            hiveConf = new HiveConf(this.getClass());
+            hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+            hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+            hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+            hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR);
+            driver = new Driver(hiveConf);
+            client = new HiveMetaStoreClient(hiveConf);
+            SessionState.start(new CliSessionState(hiveConf));
+        }
     }
-  }
 
-  protected void logAndRegister(PigServer server, String query) throws IOException {
-    LOG.info("Registering pig query: " + query);
-    server.registerQuery(query);
-  }
+    protected void logAndRegister(PigServer server, String query) throws IOException {
+        LOG.info("Registering pig query: " + query);
+        server.registerQuery(query);
+    }
 }
diff --git a/src/test/org/apache/hcatalog/mapreduce/HCatMapReduceTest.java b/src/test/org/apache/hcatalog/mapreduce/HCatMapReduceTest.java
index ce59fa7..3bb3fd3 100644
--- a/src/test/org/apache/hcatalog/mapreduce/HCatMapReduceTest.java
+++ b/src/test/org/apache/hcatalog/mapreduce/HCatMapReduceTest.java
@@ -68,284 +68,283 @@
  */
 public abstract class HCatMapReduceTest extends TestCase {
 
-  private static final Logger LOG = LoggerFactory.getLogger(HCatMapReduceTest.class);
-  protected String dbName = "default";
-  protected String tableName = "testHCatMapReduceTable";
+    private static final Logger LOG = LoggerFactory.getLogger(HCatMapReduceTest.class);
+    protected String dbName = "default";
+    protected String tableName = "testHCatMapReduceTable";
 
-  protected String inputFormat = RCFileInputFormat.class.getName();
-  protected String outputFormat = RCFileOutputFormat.class.getName();
-  protected String serdeClass = ColumnarSerDe.class.getName();
+    protected String inputFormat = RCFileInputFormat.class.getName();
+    protected String outputFormat = RCFileOutputFormat.class.getName();
+    protected String serdeClass = ColumnarSerDe.class.getName();
 
-  private static List<HCatRecord> writeRecords = new ArrayList<HCatRecord>();
-  private static List<HCatRecord> readRecords = new ArrayList<HCatRecord>();
+    private static List<HCatRecord> writeRecords = new ArrayList<HCatRecord>();
+    private static List<HCatRecord> readRecords = new ArrayList<HCatRecord>();
 
-  protected abstract void initialize() throws Exception;
+    protected abstract void initialize() throws Exception;
 
-  protected abstract List<FieldSchema> getPartitionKeys();
+    protected abstract List<FieldSchema> getPartitionKeys();
 
-  protected abstract List<FieldSchema> getTableColumns();
+    protected abstract List<FieldSchema> getTableColumns();
 
-  private HiveMetaStoreClient client;
-  protected HiveConf hiveConf;
+    private HiveMetaStoreClient client;
+    protected HiveConf hiveConf;
 
-  private FileSystem fs;
-  private String thriftUri = null;
+    private FileSystem fs;
+    private String thriftUri = null;
 
-  protected Driver driver;
-
-  @Override
-  protected void setUp() throws Exception {
-    hiveConf = new HiveConf(this.getClass());
-
-    //The default org.apache.hadoop.hive.ql.hooks.PreExecutePrinter hook
-    //is present only in the ql/test directory
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    driver = new Driver(hiveConf);
-    SessionState.start(new CliSessionState(hiveConf));
-
-    thriftUri = System.getenv("HCAT_METASTORE_URI");
-
-    if( thriftUri != null ) {
-      LOG.info("Using URI {}", thriftUri);
-
-      hiveConf.set("hive.metastore.local", "false");
-      hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, thriftUri);
-    }
-
-    fs = new LocalFileSystem();
-    fs.initialize(fs.getWorkingDirectory().toUri(), new Configuration());
-
-    initialize();
-
-    client = new HiveMetaStoreClient(hiveConf, null);
-    initTable();
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    try {
-      String databaseName = (dbName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName;
-
-      client.dropTable(databaseName, tableName);
-    } catch(Exception e) {
-      e.printStackTrace();
-      throw e;
-    }
-
-    client.close();
-  }
-
-
-
-  private void initTable() throws Exception {
-
-    String databaseName = (dbName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName;
-
-    try {
-      client.dropTable(databaseName, tableName);
-    } catch(Exception e) {
-    } //can fail with NoSuchObjectException
-
-
-    Table tbl = new Table();
-    tbl.setDbName(databaseName);
-    tbl.setTableName(tableName);
-    tbl.setTableType("MANAGED_TABLE");
-    StorageDescriptor sd = new StorageDescriptor();
-
-    sd.setCols(getTableColumns());
-    tbl.setPartitionKeys(getPartitionKeys());
-
-    tbl.setSd(sd);
-
-    sd.setBucketCols(new ArrayList<String>(2));
-    sd.setSerdeInfo(new SerDeInfo());
-    sd.getSerdeInfo().setName(tbl.getTableName());
-    sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-    sd.getSerdeInfo().getParameters().put(
-        org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
-    sd.getSerdeInfo().setSerializationLib(serdeClass);
-    sd.setInputFormat(inputFormat);
-    sd.setOutputFormat(outputFormat);
-
-    Map<String, String> tableParams = new HashMap<String, String>();
-    tbl.setParameters(tableParams);
-
-    client.createTable(tbl);
-  }
-
-  //Create test input file with specified number of rows
-  private void createInputFile(Path path, int rowCount) throws IOException {
-
-    if( fs.exists(path) ) {
-      fs.delete(path, true);
-    }
-
-    FSDataOutputStream os = fs.create(path);
-
-    for(int i = 0;i < rowCount;i++) {
-      os.writeChars(i + "\n");
-    }
-
-    os.close();
-  }
-
-  public static class MapCreate extends
-  Mapper<LongWritable, Text, BytesWritable, HCatRecord> {
-
-    static int writeCount = 0; //test will be in local mode
+    protected Driver driver;
 
     @Override
-    public void map(LongWritable key, Text value, Context context
-    ) throws IOException, InterruptedException {
-      {
-        try {
-          HCatRecord rec = writeRecords.get(writeCount);
-          context.write(null, rec);
-          writeCount++;
+    protected void setUp() throws Exception {
+        hiveConf = new HiveConf(this.getClass());
 
-        }catch(Exception e) {
+        //The default org.apache.hadoop.hive.ql.hooks.PreExecutePrinter hook
+        //is present only in the ql/test directory
+        hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+        hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+        hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+        driver = new Driver(hiveConf);
+        SessionState.start(new CliSessionState(hiveConf));
 
-          e.printStackTrace(System.err); //print since otherwise exception is lost
-          throw new IOException(e);
+        thriftUri = System.getenv("HCAT_METASTORE_URI");
+
+        if (thriftUri != null) {
+            LOG.info("Using URI {}", thriftUri);
+
+            hiveConf.set("hive.metastore.local", "false");
+            hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, thriftUri);
         }
-      }
+
+        fs = new LocalFileSystem();
+        fs.initialize(fs.getWorkingDirectory().toUri(), new Configuration());
+
+        initialize();
+
+        client = new HiveMetaStoreClient(hiveConf, null);
+        initTable();
     }
-  }
-
-  public static class MapRead extends
-  Mapper<WritableComparable, HCatRecord, BytesWritable, Text> {
-
-    static int readCount = 0; //test will be in local mode
 
     @Override
-    public void map(WritableComparable key, HCatRecord value, Context context
-    ) throws IOException, InterruptedException {
-      {
+    protected void tearDown() throws Exception {
         try {
-          readRecords.add(value);
-          readCount++;
-        } catch(Exception e) {
-          e.printStackTrace(); //print since otherwise exception is lost
-          throw new IOException(e);
+            String databaseName = (dbName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName;
+
+            client.dropTable(databaseName, tableName);
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw e;
         }
-      }
-    }
-  }
 
-  Job runMRCreate(Map<String, String> partitionValues,
-        List<HCatFieldSchema> partitionColumns, List<HCatRecord> records,
-        int writeCount, boolean assertWrite) throws Exception {
-
-    writeRecords = records;
-    MapCreate.writeCount = 0;
-
-    Configuration conf = new Configuration();
-    Job job = new Job(conf, "hcat mapreduce write test");
-    job.setJarByClass(this.getClass());
-    job.setMapperClass(HCatMapReduceTest.MapCreate.class);
-
-    // input/output settings
-    job.setInputFormatClass(TextInputFormat.class);
-
-    Path path = new Path(fs.getWorkingDirectory(), "mapred/testHCatMapReduceInput");
-    createInputFile(path, writeCount);
-
-    TextInputFormat.setInputPaths(job, path);
-
-    job.setOutputFormatClass(HCatOutputFormat.class);
-
-    OutputJobInfo outputJobInfo = OutputJobInfo.create(dbName, tableName, partitionValues);
-    HCatOutputFormat.setOutput(job, outputJobInfo);
-
-    job.setMapOutputKeyClass(BytesWritable.class);
-    job.setMapOutputValueClass(DefaultHCatRecord.class);
-
-    job.setNumReduceTasks(0);
-
-    HCatOutputFormat.setSchema(job, new HCatSchema(partitionColumns));
-
-    boolean success = job.waitForCompletion(true);
-
-    // Ensure counters are set when data has actually been read.
-    if (partitionValues != null) {
-      assertTrue(job.getCounters().getGroup("FileSystemCounters")
-          .findCounter("FILE_BYTES_READ").getValue() > 0);
+        client.close();
     }
 
-    if (!HcatTestUtils.isHadoop23()) {
-        // Local mode outputcommitter hook is not invoked in Hadoop 1.x
-        if (success) {
-            new FileOutputCommitterContainer(job,null).commitJob(job);
-        } else {
-            new FileOutputCommitterContainer(job,null).abortJob(job, JobStatus.State.FAILED);
+
+    private void initTable() throws Exception {
+
+        String databaseName = (dbName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName;
+
+        try {
+            client.dropTable(databaseName, tableName);
+        } catch (Exception e) {
+        } //can fail with NoSuchObjectException
+
+
+        Table tbl = new Table();
+        tbl.setDbName(databaseName);
+        tbl.setTableName(tableName);
+        tbl.setTableType("MANAGED_TABLE");
+        StorageDescriptor sd = new StorageDescriptor();
+
+        sd.setCols(getTableColumns());
+        tbl.setPartitionKeys(getPartitionKeys());
+
+        tbl.setSd(sd);
+
+        sd.setBucketCols(new ArrayList<String>(2));
+        sd.setSerdeInfo(new SerDeInfo());
+        sd.getSerdeInfo().setName(tbl.getTableName());
+        sd.getSerdeInfo().setParameters(new HashMap<String, String>());
+        sd.getSerdeInfo().getParameters().put(
+                org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
+        sd.getSerdeInfo().setSerializationLib(serdeClass);
+        sd.setInputFormat(inputFormat);
+        sd.setOutputFormat(outputFormat);
+
+        Map<String, String> tableParams = new HashMap<String, String>();
+        tbl.setParameters(tableParams);
+
+        client.createTable(tbl);
+    }
+
+    //Create test input file with specified number of rows
+    private void createInputFile(Path path, int rowCount) throws IOException {
+
+        if (fs.exists(path)) {
+            fs.delete(path, true);
+        }
+
+        FSDataOutputStream os = fs.create(path);
+
+        for (int i = 0; i < rowCount; i++) {
+            os.writeChars(i + "\n");
+        }
+
+        os.close();
+    }
+
+    public static class MapCreate extends
+            Mapper<LongWritable, Text, BytesWritable, HCatRecord> {
+
+        static int writeCount = 0; //test will be in local mode
+
+        @Override
+        public void map(LongWritable key, Text value, Context context
+        ) throws IOException, InterruptedException {
+            {
+                try {
+                    HCatRecord rec = writeRecords.get(writeCount);
+                    context.write(null, rec);
+                    writeCount++;
+
+                } catch (Exception e) {
+
+                    e.printStackTrace(System.err); //print since otherwise exception is lost
+                    throw new IOException(e);
+                }
+            }
         }
     }
-    if (assertWrite){
-      // we assert only if we expected to assert with this call.
-      Assert.assertEquals(writeCount, MapCreate.writeCount);
+
+    public static class MapRead extends
+            Mapper<WritableComparable, HCatRecord, BytesWritable, Text> {
+
+        static int readCount = 0; //test will be in local mode
+
+        @Override
+        public void map(WritableComparable key, HCatRecord value, Context context
+        ) throws IOException, InterruptedException {
+            {
+                try {
+                    readRecords.add(value);
+                    readCount++;
+                } catch (Exception e) {
+                    e.printStackTrace(); //print since otherwise exception is lost
+                    throw new IOException(e);
+                }
+            }
+        }
     }
 
-    return job;
-  }
+    Job runMRCreate(Map<String, String> partitionValues,
+                    List<HCatFieldSchema> partitionColumns, List<HCatRecord> records,
+                    int writeCount, boolean assertWrite) throws Exception {
 
-  List<HCatRecord> runMRRead(int readCount) throws Exception {
-    return runMRRead(readCount, null);
-  }
+        writeRecords = records;
+        MapCreate.writeCount = 0;
 
-  List<HCatRecord> runMRRead(int readCount, String filter) throws Exception {
+        Configuration conf = new Configuration();
+        Job job = new Job(conf, "hcat mapreduce write test");
+        job.setJarByClass(this.getClass());
+        job.setMapperClass(HCatMapReduceTest.MapCreate.class);
 
-    MapRead.readCount = 0;
-    readRecords.clear();
+        // input/output settings
+        job.setInputFormatClass(TextInputFormat.class);
 
-    Configuration conf = new Configuration();
-    Job job = new Job(conf, "hcat mapreduce read test");
-    job.setJarByClass(this.getClass());
-    job.setMapperClass(HCatMapReduceTest.MapRead.class);
+        Path path = new Path(fs.getWorkingDirectory(), "mapred/testHCatMapReduceInput");
+        createInputFile(path, writeCount);
 
-    // input/output settings
-    job.setInputFormatClass(HCatInputFormat.class);
-    job.setOutputFormatClass(TextOutputFormat.class);
+        TextInputFormat.setInputPaths(job, path);
 
-    InputJobInfo inputJobInfo = InputJobInfo.create(dbName,tableName,filter);
-    HCatInputFormat.setInput(job, inputJobInfo);
+        job.setOutputFormatClass(HCatOutputFormat.class);
 
-    job.setMapOutputKeyClass(BytesWritable.class);
-    job.setMapOutputValueClass(Text.class);
+        OutputJobInfo outputJobInfo = OutputJobInfo.create(dbName, tableName, partitionValues);
+        HCatOutputFormat.setOutput(job, outputJobInfo);
 
-    job.setNumReduceTasks(0);
+        job.setMapOutputKeyClass(BytesWritable.class);
+        job.setMapOutputValueClass(DefaultHCatRecord.class);
 
-    Path path = new Path(fs.getWorkingDirectory(), "mapred/testHCatMapReduceOutput");
-    if( fs.exists(path) ) {
-      fs.delete(path, true);
+        job.setNumReduceTasks(0);
+
+        HCatOutputFormat.setSchema(job, new HCatSchema(partitionColumns));
+
+        boolean success = job.waitForCompletion(true);
+
+        // Ensure counters are set when data has actually been read.
+        if (partitionValues != null) {
+            assertTrue(job.getCounters().getGroup("FileSystemCounters")
+                    .findCounter("FILE_BYTES_READ").getValue() > 0);
+        }
+
+        if (!HcatTestUtils.isHadoop23()) {
+            // Local mode outputcommitter hook is not invoked in Hadoop 1.x
+            if (success) {
+                new FileOutputCommitterContainer(job, null).commitJob(job);
+            } else {
+                new FileOutputCommitterContainer(job, null).abortJob(job, JobStatus.State.FAILED);
+            }
+        }
+        if (assertWrite) {
+            // we assert only if we expected to assert with this call.
+            Assert.assertEquals(writeCount, MapCreate.writeCount);
+        }
+
+        return job;
     }
 
-    TextOutputFormat.setOutputPath(job, path);
+    List<HCatRecord> runMRRead(int readCount) throws Exception {
+        return runMRRead(readCount, null);
+    }
 
-    job.waitForCompletion(true);
-    Assert.assertEquals(readCount, MapRead.readCount);
+    List<HCatRecord> runMRRead(int readCount, String filter) throws Exception {
 
-    return readRecords;
-  }
+        MapRead.readCount = 0;
+        readRecords.clear();
+
+        Configuration conf = new Configuration();
+        Job job = new Job(conf, "hcat mapreduce read test");
+        job.setJarByClass(this.getClass());
+        job.setMapperClass(HCatMapReduceTest.MapRead.class);
+
+        // input/output settings
+        job.setInputFormatClass(HCatInputFormat.class);
+        job.setOutputFormatClass(TextOutputFormat.class);
+
+        InputJobInfo inputJobInfo = InputJobInfo.create(dbName, tableName, filter);
+        HCatInputFormat.setInput(job, inputJobInfo);
+
+        job.setMapOutputKeyClass(BytesWritable.class);
+        job.setMapOutputValueClass(Text.class);
+
+        job.setNumReduceTasks(0);
+
+        Path path = new Path(fs.getWorkingDirectory(), "mapred/testHCatMapReduceOutput");
+        if (fs.exists(path)) {
+            fs.delete(path, true);
+        }
+
+        TextOutputFormat.setOutputPath(job, path);
+
+        job.waitForCompletion(true);
+        Assert.assertEquals(readCount, MapRead.readCount);
+
+        return readRecords;
+    }
 
 
-  protected HCatSchema getTableSchema() throws Exception {
+    protected HCatSchema getTableSchema() throws Exception {
 
-    Configuration conf = new Configuration();
-    Job job = new Job(conf, "hcat mapreduce read schema test");
-    job.setJarByClass(this.getClass());
+        Configuration conf = new Configuration();
+        Job job = new Job(conf, "hcat mapreduce read schema test");
+        job.setJarByClass(this.getClass());
 
-    // input/output settings
-    job.setInputFormatClass(HCatInputFormat.class);
-    job.setOutputFormatClass(TextOutputFormat.class);
+        // input/output settings
+        job.setInputFormatClass(HCatInputFormat.class);
+        job.setOutputFormatClass(TextOutputFormat.class);
 
-    InputJobInfo inputJobInfo = InputJobInfo.create(dbName,tableName,null);
-    HCatInputFormat.setInput(job, inputJobInfo);
+        InputJobInfo inputJobInfo = InputJobInfo.create(dbName, tableName, null);
+        HCatInputFormat.setInput(job, inputJobInfo);
 
-    return HCatInputFormat.getTableSchema(job);
-  }
+        return HCatInputFormat.getTableSchema(job);
+    }
 
 }
 
diff --git a/src/test/org/apache/hcatalog/mapreduce/TestHCatDynamicPartitioned.java b/src/test/org/apache/hcatalog/mapreduce/TestHCatDynamicPartitioned.java
index 77d59eb..c9b8ac5 100644
--- a/src/test/org/apache/hcatalog/mapreduce/TestHCatDynamicPartitioned.java
+++ b/src/test/org/apache/hcatalog/mapreduce/TestHCatDynamicPartitioned.java
@@ -39,124 +39,124 @@
 
 public class TestHCatDynamicPartitioned extends HCatMapReduceTest {
 
-  private List<HCatRecord> writeRecords;
-  private List<HCatFieldSchema> dataColumns;
-  private static final Logger LOG = LoggerFactory.getLogger(TestHCatDynamicPartitioned.class);
+    private List<HCatRecord> writeRecords;
+    private List<HCatFieldSchema> dataColumns;
+    private static final Logger LOG = LoggerFactory.getLogger(TestHCatDynamicPartitioned.class);
 
-  @Override
-  protected void initialize() throws Exception {
+    @Override
+    protected void initialize() throws Exception {
 
-    tableName = "testHCatDynamicPartitionedTable";
-    generateWriteRecords(20,5,0);
-    generateDataColumns();
-  }
-
-  private void generateDataColumns() throws HCatException {
-    dataColumns = new ArrayList<HCatFieldSchema>();
-    dataColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
-    dataColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
-    dataColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("p1", Constants.STRING_TYPE_NAME, "")));
-  }
-
-  private void generateWriteRecords(int max, int mod,int offset) {
-    writeRecords = new ArrayList<HCatRecord>();
-
-    for(int i = 0;i < max;i++) {
-      List<Object> objList = new ArrayList<Object>();
-
-      objList.add(i);
-      objList.add("strvalue" + i);
-      objList.add(String.valueOf((i % mod)+offset));
-      writeRecords.add(new DefaultHCatRecord(objList));
-    }
-  }
-
-  @Override
-  protected List<FieldSchema> getPartitionKeys() {
-    List<FieldSchema> fields = new ArrayList<FieldSchema>();
-    fields.add(new FieldSchema("p1", Constants.STRING_TYPE_NAME, ""));
-    return fields;
-  }
-
-  @Override
-  protected List<FieldSchema> getTableColumns() {
-    List<FieldSchema> fields = new ArrayList<FieldSchema>();
-    fields.add(new FieldSchema("c1", Constants.INT_TYPE_NAME, ""));
-    fields.add(new FieldSchema("c2", Constants.STRING_TYPE_NAME, ""));
-    return fields;
-  }
-
-
-  public void testHCatDynamicPartitionedTable() throws Exception {
-
-    generateWriteRecords(20,5,0);
-    runMRCreate(null, dataColumns, writeRecords, 20,true);
-
-    runMRRead(20);
-
-    //Read with partition filter
-    runMRRead(4, "p1 = \"0\"");
-    runMRRead(8, "p1 = \"1\" or p1 = \"3\"");
-    runMRRead(4, "p1 = \"4\"");
-
-    // read from hive to test
-
-    String query = "select * from " + tableName;
-    int retCode = driver.run(query).getResponseCode();
-
-    if( retCode != 0 ) {
-      throw new Exception("Error " + retCode + " running query " + query);
+        tableName = "testHCatDynamicPartitionedTable";
+        generateWriteRecords(20, 5, 0);
+        generateDataColumns();
     }
 
-    ArrayList<String> res = new ArrayList<String>();
-    driver.getResults(res);
-    assertEquals(20, res.size());
-
-
-    //Test for duplicate publish
-    IOException exc = null;
-    try {
-      generateWriteRecords(20,5,0);
-      Job job = runMRCreate(null, dataColumns, writeRecords, 20,false);
-      if (HcatTestUtils.isHadoop23()) {
-          new FileOutputCommitterContainer(job,null).cleanupJob(job);
-      }
-    } catch(IOException e) {
-      exc = e;
+    private void generateDataColumns() throws HCatException {
+        dataColumns = new ArrayList<HCatFieldSchema>();
+        dataColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
+        dataColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
+        dataColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("p1", Constants.STRING_TYPE_NAME, "")));
     }
 
-    assertTrue(exc != null);
-    assertTrue(exc instanceof HCatException);
-    assertTrue( "Got exception of type ["+((HCatException) exc).getErrorType().toString()
-        + "] Expected ERROR_PUBLISHING_PARTITION or ERROR_MOVE_FAILED",
-        (ErrorType.ERROR_PUBLISHING_PARTITION == ((HCatException) exc).getErrorType())
-        || (ErrorType.ERROR_MOVE_FAILED == ((HCatException) exc).getErrorType())
+    private void generateWriteRecords(int max, int mod, int offset) {
+        writeRecords = new ArrayList<HCatRecord>();
+
+        for (int i = 0; i < max; i++) {
+            List<Object> objList = new ArrayList<Object>();
+
+            objList.add(i);
+            objList.add("strvalue" + i);
+            objList.add(String.valueOf((i % mod) + offset));
+            writeRecords.add(new DefaultHCatRecord(objList));
+        }
+    }
+
+    @Override
+    protected List<FieldSchema> getPartitionKeys() {
+        List<FieldSchema> fields = new ArrayList<FieldSchema>();
+        fields.add(new FieldSchema("p1", Constants.STRING_TYPE_NAME, ""));
+        return fields;
+    }
+
+    @Override
+    protected List<FieldSchema> getTableColumns() {
+        List<FieldSchema> fields = new ArrayList<FieldSchema>();
+        fields.add(new FieldSchema("c1", Constants.INT_TYPE_NAME, ""));
+        fields.add(new FieldSchema("c2", Constants.STRING_TYPE_NAME, ""));
+        return fields;
+    }
+
+
+    public void testHCatDynamicPartitionedTable() throws Exception {
+
+        generateWriteRecords(20, 5, 0);
+        runMRCreate(null, dataColumns, writeRecords, 20, true);
+
+        runMRRead(20);
+
+        //Read with partition filter
+        runMRRead(4, "p1 = \"0\"");
+        runMRRead(8, "p1 = \"1\" or p1 = \"3\"");
+        runMRRead(4, "p1 = \"4\"");
+
+        // read from hive to test
+
+        String query = "select * from " + tableName;
+        int retCode = driver.run(query).getResponseCode();
+
+        if (retCode != 0) {
+            throw new Exception("Error " + retCode + " running query " + query);
+        }
+
+        ArrayList<String> res = new ArrayList<String>();
+        driver.getResults(res);
+        assertEquals(20, res.size());
+
+
+        //Test for duplicate publish
+        IOException exc = null;
+        try {
+            generateWriteRecords(20, 5, 0);
+            Job job = runMRCreate(null, dataColumns, writeRecords, 20, false);
+            if (HcatTestUtils.isHadoop23()) {
+                new FileOutputCommitterContainer(job, null).cleanupJob(job);
+            }
+        } catch (IOException e) {
+            exc = e;
+        }
+
+        assertTrue(exc != null);
+        assertTrue(exc instanceof HCatException);
+        assertTrue("Got exception of type [" + ((HCatException) exc).getErrorType().toString()
+                + "] Expected ERROR_PUBLISHING_PARTITION or ERROR_MOVE_FAILED",
+                (ErrorType.ERROR_PUBLISHING_PARTITION == ((HCatException) exc).getErrorType())
+                        || (ErrorType.ERROR_MOVE_FAILED == ((HCatException) exc).getErrorType())
         );
-  }
+    }
 
-//TODO 1.0 miniCluster is slow this test times out, make it work
+    //TODO 1.0 miniCluster is slow this test times out, make it work
 // renaming test to make test framework skip it
-  public void _testHCatDynamicPartitionMaxPartitions() throws Exception {
-    HiveConf hc = new HiveConf(this.getClass());
+    public void _testHCatDynamicPartitionMaxPartitions() throws Exception {
+        HiveConf hc = new HiveConf(this.getClass());
 
-    int maxParts = hiveConf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS);
-    LOG.info("Max partitions allowed = {}", maxParts);
+        int maxParts = hiveConf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS);
+        LOG.info("Max partitions allowed = {}", maxParts);
 
-    IOException exc = null;
-    try {
-      generateWriteRecords(maxParts+5,maxParts+2,10);
-      runMRCreate(null,dataColumns,writeRecords,maxParts+5,false);
-    } catch(IOException e) {
-      exc = e;
+        IOException exc = null;
+        try {
+            generateWriteRecords(maxParts + 5, maxParts + 2, 10);
+            runMRCreate(null, dataColumns, writeRecords, maxParts + 5, false);
+        } catch (IOException e) {
+            exc = e;
+        }
+
+        if (HCatConstants.HCAT_IS_DYNAMIC_MAX_PTN_CHECK_ENABLED) {
+            assertTrue(exc != null);
+            assertTrue(exc instanceof HCatException);
+            assertEquals(ErrorType.ERROR_TOO_MANY_DYNAMIC_PTNS, ((HCatException) exc).getErrorType());
+        } else {
+            assertTrue(exc == null);
+            runMRRead(maxParts + 5);
+        }
     }
-
-    if (HCatConstants.HCAT_IS_DYNAMIC_MAX_PTN_CHECK_ENABLED){
-      assertTrue(exc != null);
-      assertTrue(exc instanceof HCatException);
-      assertEquals(ErrorType.ERROR_TOO_MANY_DYNAMIC_PTNS, ((HCatException) exc).getErrorType());
-    }else{
-      assertTrue(exc == null);
-      runMRRead(maxParts+5);
-    }
-  }
 }
diff --git a/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveCompatibility.java b/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveCompatibility.java
index b756024..aa450fc 100644
--- a/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveCompatibility.java
+++ b/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveCompatibility.java
@@ -35,95 +35,95 @@
 import org.junit.Test;
 
 public class TestHCatHiveCompatibility extends HCatBaseTest {
-  private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data";
+    private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data";
 
-  @BeforeClass
-  public static void createInputData() throws Exception {
-    int LOOP_SIZE = 11;
-    File file = new File(INPUT_FILE_NAME);
-    file.deleteOnExit();
-    FileWriter writer = new FileWriter(file);
-    for (int i = 0; i < LOOP_SIZE; i++) {
-      writer.write(i + "\t1\n");
-    }
-    writer.close();
-  }
-
-  @Test
-  public void testUnpartedReadWrite() throws Exception{
-
-    driver.run("drop table if exists junit_unparted_noisd");
-    String createTable = "create table junit_unparted_noisd(a int) stored as RCFILE";
-    Assert.assertEquals(0, driver.run(createTable).getResponseCode());
-
-    // assert that the table created has no hcat instrumentation, and that we're still able to read it.
-    Table table = client.getTable("default", "junit_unparted_noisd");
-    Assert.assertTrue(table.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
-
-    PigServer server = new PigServer(ExecType.LOCAL);
-    logAndRegister(server, "A = load '" + INPUT_FILE_NAME + "' as (a:int);");
-    logAndRegister(server, "store A into 'default.junit_unparted_noisd' using org.apache.hcatalog.pig.HCatStorer();");
-    logAndRegister(server, "B = load 'default.junit_unparted_noisd' using " + HCatLoader.class.getName() + "();");
-    Iterator<Tuple> itr= server.openIterator("B");
-
-    int i = 0;
-
-    while(itr.hasNext()){
-      Tuple t = itr.next();
-      Assert.assertEquals(1, t.size());
-      Assert.assertEquals(t.get(0), i);
-      i++;
+    @BeforeClass
+    public static void createInputData() throws Exception {
+        int LOOP_SIZE = 11;
+        File file = new File(INPUT_FILE_NAME);
+        file.deleteOnExit();
+        FileWriter writer = new FileWriter(file);
+        for (int i = 0; i < LOOP_SIZE; i++) {
+            writer.write(i + "\t1\n");
+        }
+        writer.close();
     }
 
-    Assert.assertFalse(itr.hasNext());
-    Assert.assertEquals(11, i);
+    @Test
+    public void testUnpartedReadWrite() throws Exception {
 
-    // assert that the table created still has no hcat instrumentation
-    Table table2 = client.getTable("default", "junit_unparted_noisd");
-    Assert.assertTrue(table2.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
+        driver.run("drop table if exists junit_unparted_noisd");
+        String createTable = "create table junit_unparted_noisd(a int) stored as RCFILE";
+        Assert.assertEquals(0, driver.run(createTable).getResponseCode());
 
-    driver.run("drop table junit_unparted_noisd");
-  }
+        // assert that the table created has no hcat instrumentation, and that we're still able to read it.
+        Table table = client.getTable("default", "junit_unparted_noisd");
+        Assert.assertTrue(table.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
 
-  @Test
-  public void testPartedRead() throws Exception{
+        PigServer server = new PigServer(ExecType.LOCAL);
+        logAndRegister(server, "A = load '" + INPUT_FILE_NAME + "' as (a:int);");
+        logAndRegister(server, "store A into 'default.junit_unparted_noisd' using org.apache.hcatalog.pig.HCatStorer();");
+        logAndRegister(server, "B = load 'default.junit_unparted_noisd' using " + HCatLoader.class.getName() + "();");
+        Iterator<Tuple> itr = server.openIterator("B");
 
-    driver.run("drop table if exists junit_parted_noisd");
-    String createTable = "create table junit_parted_noisd(a int) partitioned by (b string) stored as RCFILE";
-    Assert.assertEquals(0, driver.run(createTable).getResponseCode());
+        int i = 0;
 
-    // assert that the table created has no hcat instrumentation, and that we're still able to read it.
-    Table table = client.getTable("default", "junit_parted_noisd");
-    Assert.assertTrue(table.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
+        while (itr.hasNext()) {
+            Tuple t = itr.next();
+            Assert.assertEquals(1, t.size());
+            Assert.assertEquals(t.get(0), i);
+            i++;
+        }
 
-    PigServer server = new PigServer(ExecType.LOCAL);
-    logAndRegister(server, "A = load '" + INPUT_FILE_NAME + "' as (a:int);");
-    logAndRegister(server, "store A into 'default.junit_parted_noisd' using org.apache.hcatalog.pig.HCatStorer('b=42');");
-    logAndRegister(server, "B = load 'default.junit_parted_noisd' using " + HCatLoader.class.getName() + "();");
-    Iterator<Tuple> itr= server.openIterator("B");
+        Assert.assertFalse(itr.hasNext());
+        Assert.assertEquals(11, i);
 
-    int i = 0;
+        // assert that the table created still has no hcat instrumentation
+        Table table2 = client.getTable("default", "junit_unparted_noisd");
+        Assert.assertTrue(table2.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
 
-    while(itr.hasNext()){
-      Tuple t = itr.next();
-      Assert.assertEquals(2, t.size()); // Contains explicit field "a" and partition "b".
-      Assert.assertEquals(t.get(0), i);
-      Assert.assertEquals(t.get(1), "42");
-      i++;
+        driver.run("drop table junit_unparted_noisd");
     }
 
-    Assert.assertFalse(itr.hasNext());
-    Assert.assertEquals(11, i);
+    @Test
+    public void testPartedRead() throws Exception {
 
-    // assert that the table created still has no hcat instrumentation
-    Table table2 = client.getTable("default", "junit_parted_noisd");
-    Assert.assertTrue(table2.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
+        driver.run("drop table if exists junit_parted_noisd");
+        String createTable = "create table junit_parted_noisd(a int) partitioned by (b string) stored as RCFILE";
+        Assert.assertEquals(0, driver.run(createTable).getResponseCode());
 
-    // assert that there is one partition present, and it had hcat instrumentation inserted when it was created.
-    Partition ptn = client.getPartition("default", "junit_parted_noisd", Arrays.asList("42"));
+        // assert that the table created has no hcat instrumentation, and that we're still able to read it.
+        Table table = client.getTable("default", "junit_parted_noisd");
+        Assert.assertTrue(table.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
 
-    Assert.assertNotNull(ptn);
-    Assert.assertTrue(ptn.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
-    driver.run("drop table junit_unparted_noisd");
-  }
+        PigServer server = new PigServer(ExecType.LOCAL);
+        logAndRegister(server, "A = load '" + INPUT_FILE_NAME + "' as (a:int);");
+        logAndRegister(server, "store A into 'default.junit_parted_noisd' using org.apache.hcatalog.pig.HCatStorer('b=42');");
+        logAndRegister(server, "B = load 'default.junit_parted_noisd' using " + HCatLoader.class.getName() + "();");
+        Iterator<Tuple> itr = server.openIterator("B");
+
+        int i = 0;
+
+        while (itr.hasNext()) {
+            Tuple t = itr.next();
+            Assert.assertEquals(2, t.size()); // Contains explicit field "a" and partition "b".
+            Assert.assertEquals(t.get(0), i);
+            Assert.assertEquals(t.get(1), "42");
+            i++;
+        }
+
+        Assert.assertFalse(itr.hasNext());
+        Assert.assertEquals(11, i);
+
+        // assert that the table created still has no hcat instrumentation
+        Table table2 = client.getTable("default", "junit_parted_noisd");
+        Assert.assertTrue(table2.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
+
+        // assert that there is one partition present, and it had hcat instrumentation inserted when it was created.
+        Partition ptn = client.getPartition("default", "junit_parted_noisd", Arrays.asList("42"));
+
+        Assert.assertNotNull(ptn);
+        Assert.assertTrue(ptn.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
+        driver.run("drop table junit_unparted_noisd");
+    }
 }
diff --git a/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveThriftCompatibility.java b/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveThriftCompatibility.java
index fd70188..62dcb7c 100644
--- a/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveThriftCompatibility.java
+++ b/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveThriftCompatibility.java
@@ -39,77 +39,77 @@
 
 public class TestHCatHiveThriftCompatibility extends HCatBaseTest {
 
-  private boolean setUpComplete = false;
-  private Path intStringSeq;
+    private boolean setUpComplete = false;
+    private Path intStringSeq;
 
-  @Before
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    if (setUpComplete) {
-      return;
+    @Before
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        if (setUpComplete) {
+            return;
+        }
+
+        ByteArrayOutputStream out = new ByteArrayOutputStream();
+        TIOStreamTransport transport = new TIOStreamTransport(out);
+        TBinaryProtocol protocol = new TBinaryProtocol(transport);
+
+        IntString intString = new IntString(1, "one", 1);
+        intString.write(protocol);
+        BytesWritable bytesWritable = new BytesWritable(out.toByteArray());
+
+        intStringSeq = new Path(TEST_DATA_DIR + "/data/intString.seq");
+        LOG.info("Creating data file: " + intStringSeq);
+
+        SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(
+                intStringSeq.getFileSystem(hiveConf), hiveConf, intStringSeq,
+                NullWritable.class, BytesWritable.class);
+        seqFileWriter.append(NullWritable.get(), bytesWritable);
+        seqFileWriter.close();
+
+        setUpComplete = true;
     }
 
-    ByteArrayOutputStream out = new ByteArrayOutputStream();
-    TIOStreamTransport transport = new TIOStreamTransport(out);
-    TBinaryProtocol protocol = new TBinaryProtocol(transport);
+    /**
+     *  Create a table with no explicit schema and ensure its correctly
+     *  discovered from the thrift struct.
+     */
+    @Test
+    public void testDynamicCols() throws Exception {
+        Assert.assertEquals(0, driver.run("drop table if exists test_thrift").getResponseCode());
+        Assert.assertEquals(0, driver.run(
+                "create external table test_thrift " +
+                        "partitioned by (year string) " +
+                        "row format serde 'org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer' " +
+                        "with serdeproperties ( " +
+                        "  'serialization.class'='org.apache.hadoop.hive.serde2.thrift.test.IntString', " +
+                        "  'serialization.format'='org.apache.thrift.protocol.TBinaryProtocol') " +
+                        "stored as" +
+                        "  inputformat 'org.apache.hadoop.mapred.SequenceFileInputFormat'" +
+                        "  outputformat 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'")
+                .getResponseCode());
+        Assert.assertEquals(0,
+                driver.run("alter table test_thrift add partition (year = '2012') location '" +
+                        intStringSeq.getParent() + "'").getResponseCode());
 
-    IntString intString = new IntString(1, "one", 1);
-    intString.write(protocol);
-    BytesWritable bytesWritable = new BytesWritable(out.toByteArray());
+        PigServer pigServer = new PigServer(ExecType.LOCAL);
+        pigServer.registerQuery("A = load 'test_thrift' using org.apache.hcatalog.pig.HCatLoader();");
 
-    intStringSeq = new Path(TEST_DATA_DIR + "/data/intString.seq");
-    LOG.info("Creating data file: " + intStringSeq);
+        Schema expectedSchema = new Schema();
+        expectedSchema.add(new Schema.FieldSchema("myint", DataType.INTEGER));
+        expectedSchema.add(new Schema.FieldSchema("mystring", DataType.CHARARRAY));
+        expectedSchema.add(new Schema.FieldSchema("underscore_int", DataType.INTEGER));
+        expectedSchema.add(new Schema.FieldSchema("year", DataType.CHARARRAY));
 
-    SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(
-        intStringSeq.getFileSystem(hiveConf), hiveConf, intStringSeq,
-        NullWritable.class, BytesWritable.class);
-    seqFileWriter.append(NullWritable.get(), bytesWritable);
-    seqFileWriter.close();
+        Assert.assertEquals(expectedSchema, pigServer.dumpSchema("A"));
 
-    setUpComplete = true;
-  }
+        Iterator<Tuple> iterator = pigServer.openIterator("A");
+        Tuple t = iterator.next();
+        Assert.assertEquals(1, t.get(0));
+        Assert.assertEquals("one", t.get(1));
+        Assert.assertEquals(1, t.get(2));
+        Assert.assertEquals("2012", t.get(3));
 
-  /**
-   *  Create a table with no explicit schema and ensure its correctly
-   *  discovered from the thrift struct.
-   */
-  @Test
-  public void testDynamicCols() throws Exception {
-    Assert.assertEquals(0, driver.run("drop table if exists test_thrift").getResponseCode());
-    Assert.assertEquals(0, driver.run(
-        "create external table test_thrift " +
-            "partitioned by (year string) " +
-            "row format serde 'org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer' " +
-            "with serdeproperties ( " +
-            "  'serialization.class'='org.apache.hadoop.hive.serde2.thrift.test.IntString', " +
-            "  'serialization.format'='org.apache.thrift.protocol.TBinaryProtocol') " +
-            "stored as" +
-            "  inputformat 'org.apache.hadoop.mapred.SequenceFileInputFormat'" +
-            "  outputformat 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'")
-        .getResponseCode());
-    Assert.assertEquals(0,
-        driver.run("alter table test_thrift add partition (year = '2012') location '" +
-            intStringSeq.getParent() + "'").getResponseCode());
-
-    PigServer pigServer = new PigServer(ExecType.LOCAL);
-    pigServer.registerQuery("A = load 'test_thrift' using org.apache.hcatalog.pig.HCatLoader();");
-
-    Schema expectedSchema = new Schema();
-    expectedSchema.add(new Schema.FieldSchema("myint", DataType.INTEGER));
-    expectedSchema.add(new Schema.FieldSchema("mystring", DataType.CHARARRAY));
-    expectedSchema.add(new Schema.FieldSchema("underscore_int", DataType.INTEGER));
-    expectedSchema.add(new Schema.FieldSchema("year", DataType.CHARARRAY));
-
-    Assert.assertEquals(expectedSchema, pigServer.dumpSchema("A"));
-
-    Iterator<Tuple> iterator = pigServer.openIterator("A");
-    Tuple t = iterator.next();
-    Assert.assertEquals(1, t.get(0));
-    Assert.assertEquals("one", t.get(1));
-    Assert.assertEquals(1, t.get(2));
-    Assert.assertEquals("2012", t.get(3));
-
-    Assert.assertFalse(iterator.hasNext());
-  }
+        Assert.assertFalse(iterator.hasNext());
+    }
 }
diff --git a/src/test/org/apache/hcatalog/mapreduce/TestHCatInputFormat.java b/src/test/org/apache/hcatalog/mapreduce/TestHCatInputFormat.java
index 33c61ac..a6b381e 100644
--- a/src/test/org/apache/hcatalog/mapreduce/TestHCatInputFormat.java
+++ b/src/test/org/apache/hcatalog/mapreduce/TestHCatInputFormat.java
@@ -41,107 +41,107 @@
 
 public class TestHCatInputFormat extends HCatBaseTest {
 
-  private boolean setUpComplete = false;
+    private boolean setUpComplete = false;
 
-  /**
-   * Create an input sequence file with 100 records; every 10th record is bad.
-   * Load this table into Hive.
-   */
-  @Before
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    if (setUpComplete) {
-      return;
-    }
-
-    Path intStringSeq = new Path(TEST_DATA_DIR + "/data/intString.seq");
-    LOG.info("Creating data file: " + intStringSeq);
-    SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(
-        intStringSeq.getFileSystem(hiveConf), hiveConf, intStringSeq,
-        NullWritable.class, BytesWritable.class);
-
-    ByteArrayOutputStream out = new ByteArrayOutputStream();
-    TIOStreamTransport transport = new TIOStreamTransport(out);
-    TBinaryProtocol protocol = new TBinaryProtocol(transport);
-
-    for (int i = 1; i <= 100; i++) {
-      if (i % 10 == 0) {
-        seqFileWriter.append(NullWritable.get(), new BytesWritable("bad record".getBytes()));
-      } else {
-        out.reset();
-        IntString intString = new IntString(i, Integer.toString(i), i);
-        intString.write(protocol);
-        BytesWritable bytesWritable = new BytesWritable(out.toByteArray());
-        seqFileWriter.append(NullWritable.get(), bytesWritable);
-      }
-    }
-
-    seqFileWriter.close();
-
-    // Now let's load this file into a new Hive table.
-    Assert.assertEquals(0, driver.run("drop table if exists test_bad_records").getResponseCode());
-    Assert.assertEquals(0, driver.run(
-        "create table test_bad_records " +
-            "row format serde 'org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer' " +
-            "with serdeproperties ( " +
-            "  'serialization.class'='org.apache.hadoop.hive.serde2.thrift.test.IntString', " +
-            "  'serialization.format'='org.apache.thrift.protocol.TBinaryProtocol') " +
-            "stored as" +
-            "  inputformat 'org.apache.hadoop.mapred.SequenceFileInputFormat'" +
-            "  outputformat 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'")
-        .getResponseCode());
-    Assert.assertEquals(0, driver.run("load data local inpath '" + intStringSeq.getParent() +
-        "' into table test_bad_records").getResponseCode());
-
-    setUpComplete = true;
-  }
-
-  @Test
-  public void testBadRecordHandlingPasses() throws Exception {
-    Assert.assertTrue(runJob(0.1f));
-  }
-
-  @Test
-  public void testBadRecordHandlingFails() throws Exception {
-    Assert.assertFalse(runJob(0.01f));
-  }
-
-  private boolean runJob(float badRecordThreshold) throws Exception {
-    Configuration conf = new Configuration();
-
-    conf.setFloat(HCatConstants.HCAT_INPUT_BAD_RECORD_THRESHOLD_KEY, badRecordThreshold);
-
-    Job job = new Job(conf);
-    job.setJarByClass(this.getClass());
-    job.setMapperClass(MyMapper.class);
-
-    job.setInputFormatClass(HCatInputFormat.class);
-    job.setOutputFormatClass(TextOutputFormat.class);
-
-    HCatInputFormat.setInput(job, InputJobInfo.create("default", "test_bad_records", null));
-
-    job.setMapOutputKeyClass(HCatRecord.class);
-    job.setMapOutputValueClass(HCatRecord.class);
-
-    job.setNumReduceTasks(0);
-
-    Path path = new Path(TEST_DATA_DIR, "test_bad_record_handling_output");
-    if (path.getFileSystem(conf).exists(path)) {
-      path.getFileSystem(conf).delete(path, true);
-    }
-
-    TextOutputFormat.setOutputPath(job, path);
-
-    return job.waitForCompletion(true);
-  }
-
-  public static class MyMapper extends Mapper<NullWritable, HCatRecord, NullWritable, Text> {
+    /**
+     * Create an input sequence file with 100 records; every 10th record is bad.
+     * Load this table into Hive.
+     */
+    @Before
     @Override
-    public void map(NullWritable key, HCatRecord value, Context context)
-        throws IOException, InterruptedException {
-      LOG.info("HCatRecord: " + value);
-      context.write(NullWritable.get(), new Text(value.toString()));
+    public void setUp() throws Exception {
+        super.setUp();
+        if (setUpComplete) {
+            return;
+        }
+
+        Path intStringSeq = new Path(TEST_DATA_DIR + "/data/intString.seq");
+        LOG.info("Creating data file: " + intStringSeq);
+        SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(
+            intStringSeq.getFileSystem(hiveConf), hiveConf, intStringSeq,
+            NullWritable.class, BytesWritable.class);
+
+        ByteArrayOutputStream out = new ByteArrayOutputStream();
+        TIOStreamTransport transport = new TIOStreamTransport(out);
+        TBinaryProtocol protocol = new TBinaryProtocol(transport);
+
+        for (int i = 1; i <= 100; i++) {
+            if (i % 10 == 0) {
+                seqFileWriter.append(NullWritable.get(), new BytesWritable("bad record".getBytes()));
+            } else {
+                out.reset();
+                IntString intString = new IntString(i, Integer.toString(i), i);
+                intString.write(protocol);
+                BytesWritable bytesWritable = new BytesWritable(out.toByteArray());
+                seqFileWriter.append(NullWritable.get(), bytesWritable);
+            }
+        }
+
+        seqFileWriter.close();
+
+        // Now let's load this file into a new Hive table.
+        Assert.assertEquals(0, driver.run("drop table if exists test_bad_records").getResponseCode());
+        Assert.assertEquals(0, driver.run(
+            "create table test_bad_records " +
+                "row format serde 'org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer' " +
+                "with serdeproperties ( " +
+                "  'serialization.class'='org.apache.hadoop.hive.serde2.thrift.test.IntString', " +
+                "  'serialization.format'='org.apache.thrift.protocol.TBinaryProtocol') " +
+                "stored as" +
+                "  inputformat 'org.apache.hadoop.mapred.SequenceFileInputFormat'" +
+                "  outputformat 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'")
+            .getResponseCode());
+        Assert.assertEquals(0, driver.run("load data local inpath '" + intStringSeq.getParent() +
+            "' into table test_bad_records").getResponseCode());
+
+        setUpComplete = true;
     }
-  }
+
+    @Test
+    public void testBadRecordHandlingPasses() throws Exception {
+        Assert.assertTrue(runJob(0.1f));
+    }
+
+    @Test
+    public void testBadRecordHandlingFails() throws Exception {
+        Assert.assertFalse(runJob(0.01f));
+    }
+
+    private boolean runJob(float badRecordThreshold) throws Exception {
+        Configuration conf = new Configuration();
+
+        conf.setFloat(HCatConstants.HCAT_INPUT_BAD_RECORD_THRESHOLD_KEY, badRecordThreshold);
+
+        Job job = new Job(conf);
+        job.setJarByClass(this.getClass());
+        job.setMapperClass(MyMapper.class);
+
+        job.setInputFormatClass(HCatInputFormat.class);
+        job.setOutputFormatClass(TextOutputFormat.class);
+
+        HCatInputFormat.setInput(job, InputJobInfo.create("default", "test_bad_records", null));
+
+        job.setMapOutputKeyClass(HCatRecord.class);
+        job.setMapOutputValueClass(HCatRecord.class);
+
+        job.setNumReduceTasks(0);
+
+        Path path = new Path(TEST_DATA_DIR, "test_bad_record_handling_output");
+        if (path.getFileSystem(conf).exists(path)) {
+            path.getFileSystem(conf).delete(path, true);
+        }
+
+        TextOutputFormat.setOutputPath(job, path);
+
+        return job.waitForCompletion(true);
+    }
+
+    public static class MyMapper extends Mapper<NullWritable, HCatRecord, NullWritable, Text> {
+        @Override
+        public void map(NullWritable key, HCatRecord value, Context context)
+            throws IOException, InterruptedException {
+            LOG.info("HCatRecord: " + value);
+            context.write(NullWritable.get(), new Text(value.toString()));
+        }
+    }
 }
diff --git a/src/test/org/apache/hcatalog/mapreduce/TestHCatMultiOutputFormat.java b/src/test/org/apache/hcatalog/mapreduce/TestHCatMultiOutputFormat.java
index 1d005ab..597ab94 100644
--- a/src/test/org/apache/hcatalog/mapreduce/TestHCatMultiOutputFormat.java
+++ b/src/test/org/apache/hcatalog/mapreduce/TestHCatMultiOutputFormat.java
@@ -100,8 +100,8 @@
         public void run() {
             try {
                 String warehouseConf = HiveConf.ConfVars.METASTOREWAREHOUSE.varname + "="
-                        + warehousedir.toString();
-                HiveMetaStore.main(new String[] {"-v", "-p", msPort, "--hiveconf", warehouseConf});
+                    + warehousedir.toString();
+                HiveMetaStore.main(new String[]{"-v", "-p", msPort, "--hiveconf", warehouseConf});
             } catch (Throwable t) {
                 System.err.println("Exiting. Got exception from metastore: " + t.getMessage());
             }
@@ -177,7 +177,7 @@
         FileSystem fs = FileSystem.get(conf);
         System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());
         mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null,
-                new JobConf(conf));
+            new JobConf(conf));
         mrConf = mrCluster.createJobConf();
         fs.mkdirs(warehousedir);
 
@@ -192,7 +192,7 @@
         hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTRETRIES, 3);
 
         hiveConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
-                HCatSemanticAnalyzer.class.getName());
+            HCatSemanticAnalyzer.class.getName());
         hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
         hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
         hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
@@ -239,9 +239,9 @@
         sd.setInputFormat(org.apache.hadoop.hive.ql.io.RCFileInputFormat.class.getName());
         sd.setOutputFormat(org.apache.hadoop.hive.ql.io.RCFileOutputFormat.class.getName());
         sd.getSerdeInfo().getParameters().put(
-                org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
+            org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
         sd.getSerdeInfo().setSerializationLib(
-                org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.class.getName());
+            org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.class.getName());
         tbl.setPartitionKeys(ColumnHolder.partitionCols);
 
         hmsc.createTable(tbl);
@@ -291,10 +291,10 @@
 
         for (int i = 0; i < tableNames.length; i++) {
             configurer.addOutputFormat(tableNames[i], HCatOutputFormat.class, BytesWritable.class,
-                    HCatRecord.class);
+                HCatRecord.class);
             HCatOutputFormat.setOutput(configurer.getJob(tableNames[i]), infoList.get(i));
             HCatOutputFormat.setSchema(configurer.getJob(tableNames[i]),
-                    schemaMap.get(tableNames[i]));
+                schemaMap.get(tableNames[i]));
         }
         configurer.configure();
 
@@ -307,26 +307,26 @@
             outputs.add(getTableData(tbl, "default").get(0));
         }
         Assert.assertEquals("Comparing output of table " +
-                tableNames[0] + " is not correct", outputs.get(0), "a,a,1,ag");
+            tableNames[0] + " is not correct", outputs.get(0), "a,a,1,ag");
         Assert.assertEquals("Comparing output of table " +
-                tableNames[1] + " is not correct", outputs.get(1), "a,1,ag");
+            tableNames[1] + " is not correct", outputs.get(1), "a,1,ag");
         Assert.assertEquals("Comparing output of table " +
-                tableNames[2] + " is not correct", outputs.get(2), "a,a,extra,1,ag");
+            tableNames[2] + " is not correct", outputs.get(2), "a,a,extra,1,ag");
 
         // Check permisssion on partition dirs and files created
         for (int i = 0; i < tableNames.length; i++) {
             Path partitionFile = new Path(warehousedir + "/" + tableNames[i]
-                    + "/ds=1/cluster=ag/part-m-00000");
+                + "/ds=1/cluster=ag/part-m-00000");
             FileSystem fs = partitionFile.getFileSystem(mrConf);
             Assert.assertEquals("File permissions of table " + tableNames[i] + " is not correct",
-                    fs.getFileStatus(partitionFile).getPermission(),
-                    new FsPermission(tablePerms[i]));
+                fs.getFileStatus(partitionFile).getPermission(),
+                new FsPermission(tablePerms[i]));
             Assert.assertEquals("File permissions of table " + tableNames[i] + " is not correct",
-                    fs.getFileStatus(partitionFile.getParent()).getPermission(),
-                    new FsPermission(tablePerms[i]));
+                fs.getFileStatus(partitionFile.getParent()).getPermission(),
+                new FsPermission(tablePerms[i]));
             Assert.assertEquals("File permissions of table " + tableNames[i] + " is not correct",
-                    fs.getFileStatus(partitionFile.getParent().getParent()).getPermission(),
-                    new FsPermission(tablePerms[i]));
+                fs.getFileStatus(partitionFile.getParent().getParent()).getPermission(),
+                new FsPermission(tablePerms[i]));
 
         }
         LOG.info("File permissions verified");
@@ -392,13 +392,13 @@
     }
 
     private static class MyMapper extends
-            Mapper<LongWritable, Text, BytesWritable, HCatRecord> {
+        Mapper<LongWritable, Text, BytesWritable, HCatRecord> {
 
         private int i = 0;
 
         @Override
         protected void map(LongWritable key, Text value, Context context)
-                throws IOException, InterruptedException {
+            throws IOException, InterruptedException {
             HCatRecord record = null;
             String[] splits = value.toString().split(",");
             switch (i) {
diff --git a/src/test/org/apache/hcatalog/mapreduce/TestHCatNonPartitioned.java b/src/test/org/apache/hcatalog/mapreduce/TestHCatNonPartitioned.java
index 3ba64a5..bc2c08d 100644
--- a/src/test/org/apache/hcatalog/mapreduce/TestHCatNonPartitioned.java
+++ b/src/test/org/apache/hcatalog/mapreduce/TestHCatNonPartitioned.java
@@ -35,96 +35,96 @@
 
 public class TestHCatNonPartitioned extends HCatMapReduceTest {
 
-  private List<HCatRecord> writeRecords;
-  List<HCatFieldSchema> partitionColumns;
+    private List<HCatRecord> writeRecords;
+    List<HCatFieldSchema> partitionColumns;
 
-  @Override
-  protected void initialize() throws HCatException {
+    @Override
+    protected void initialize() throws HCatException {
 
-    dbName = null; //test if null dbName works ("default" is used)
-    tableName = "testHCatNonPartitionedTable";
+        dbName = null; //test if null dbName works ("default" is used)
+        tableName = "testHCatNonPartitionedTable";
 
-    writeRecords = new ArrayList<HCatRecord>();
+        writeRecords = new ArrayList<HCatRecord>();
 
-    for(int i = 0;i < 20;i++) {
-      List<Object> objList = new ArrayList<Object>();
+        for (int i = 0; i < 20; i++) {
+            List<Object> objList = new ArrayList<Object>();
 
-      objList.add(i);
-      objList.add("strvalue" + i);
-      writeRecords.add(new DefaultHCatRecord(objList));
+            objList.add(i);
+            objList.add("strvalue" + i);
+            writeRecords.add(new DefaultHCatRecord(objList));
+        }
+
+        partitionColumns = new ArrayList<HCatFieldSchema>();
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
     }
 
-    partitionColumns = new ArrayList<HCatFieldSchema>();
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
-  }
-
-  @Override
-  protected List<FieldSchema> getPartitionKeys() {
-    List<FieldSchema> fields = new ArrayList<FieldSchema>();
-    //empty list, non partitioned
-    return fields;
-  }
-
-  @Override
-  protected List<FieldSchema> getTableColumns() {
-    List<FieldSchema> fields = new ArrayList<FieldSchema>();
-    fields.add(new FieldSchema("c1", Constants.INT_TYPE_NAME, ""));
-    fields.add(new FieldSchema("c2", Constants.STRING_TYPE_NAME, ""));
-    return fields;
-  }
-
-
-  public void testHCatNonPartitionedTable() throws Exception {
-
-    Map<String, String> partitionMap = new HashMap<String, String>();
-    runMRCreate(null, partitionColumns, writeRecords, 10,true);
-
-    //Test for duplicate publish
-    IOException exc = null;
-    try {
-      runMRCreate(null,  partitionColumns, writeRecords, 20,true);
-    } catch(IOException e) {
-      exc = e;
+    @Override
+    protected List<FieldSchema> getPartitionKeys() {
+        List<FieldSchema> fields = new ArrayList<FieldSchema>();
+        //empty list, non partitioned
+        return fields;
     }
 
-    assertTrue(exc != null);
-    assertTrue(exc instanceof HCatException);
-    assertEquals(ErrorType.ERROR_NON_EMPTY_TABLE, ((HCatException) exc).getErrorType());
-
-    //Test for publish with invalid partition key name
-    exc = null;
-    partitionMap.clear();
-    partitionMap.put("px", "p1value2");
-
-    try {
-      runMRCreate(partitionMap, partitionColumns, writeRecords, 20,true);
-    } catch(IOException e) {
-      exc = e;
+    @Override
+    protected List<FieldSchema> getTableColumns() {
+        List<FieldSchema> fields = new ArrayList<FieldSchema>();
+        fields.add(new FieldSchema("c1", Constants.INT_TYPE_NAME, ""));
+        fields.add(new FieldSchema("c2", Constants.STRING_TYPE_NAME, ""));
+        return fields;
     }
 
-    assertTrue(exc != null);
-    assertTrue(exc instanceof HCatException);
-    assertEquals(ErrorType.ERROR_INVALID_PARTITION_VALUES, ((HCatException) exc).getErrorType());
 
-    //Read should get 10 rows
-    runMRRead(10);
+    public void testHCatNonPartitionedTable() throws Exception {
 
-    hiveReadTest();
-  }
+        Map<String, String> partitionMap = new HashMap<String, String>();
+        runMRCreate(null, partitionColumns, writeRecords, 10, true);
 
-  //Test that data inserted through hcatoutputformat is readable from hive
-  private void hiveReadTest() throws Exception {
+        //Test for duplicate publish
+        IOException exc = null;
+        try {
+            runMRCreate(null, partitionColumns, writeRecords, 20, true);
+        } catch (IOException e) {
+            exc = e;
+        }
 
-    String query = "select * from " + tableName;
-    int retCode = driver.run(query).getResponseCode();
+        assertTrue(exc != null);
+        assertTrue(exc instanceof HCatException);
+        assertEquals(ErrorType.ERROR_NON_EMPTY_TABLE, ((HCatException) exc).getErrorType());
 
-    if( retCode != 0 ) {
-      throw new Exception("Error " + retCode + " running query " + query);
+        //Test for publish with invalid partition key name
+        exc = null;
+        partitionMap.clear();
+        partitionMap.put("px", "p1value2");
+
+        try {
+            runMRCreate(partitionMap, partitionColumns, writeRecords, 20, true);
+        } catch (IOException e) {
+            exc = e;
+        }
+
+        assertTrue(exc != null);
+        assertTrue(exc instanceof HCatException);
+        assertEquals(ErrorType.ERROR_INVALID_PARTITION_VALUES, ((HCatException) exc).getErrorType());
+
+        //Read should get 10 rows
+        runMRRead(10);
+
+        hiveReadTest();
     }
 
-    ArrayList<String> res = new ArrayList<String>();
-    driver.getResults(res);
-    assertEquals(10, res.size());
-  }
+    //Test that data inserted through hcatoutputformat is readable from hive
+    private void hiveReadTest() throws Exception {
+
+        String query = "select * from " + tableName;
+        int retCode = driver.run(query).getResponseCode();
+
+        if (retCode != 0) {
+            throw new Exception("Error " + retCode + " running query " + query);
+        }
+
+        ArrayList<String> res = new ArrayList<String>();
+        driver.getResults(res);
+        assertEquals(10, res.size());
+    }
 }
diff --git a/src/test/org/apache/hcatalog/mapreduce/TestHCatOutputFormat.java b/src/test/org/apache/hcatalog/mapreduce/TestHCatOutputFormat.java
index 91192f0..1895cc4 100644
--- a/src/test/org/apache/hcatalog/mapreduce/TestHCatOutputFormat.java
+++ b/src/test/org/apache/hcatalog/mapreduce/TestHCatOutputFormat.java
@@ -46,120 +46,122 @@
 
 public class TestHCatOutputFormat extends TestCase {
 
-  private static final Logger LOG = LoggerFactory.getLogger(TestHCatOutputFormat.class);
-  private HiveMetaStoreClient client;
-  private HiveConf hiveConf;
+    private static final Logger LOG = LoggerFactory.getLogger(TestHCatOutputFormat.class);
+    private HiveMetaStoreClient client;
+    private HiveConf hiveConf;
 
-  private static final String dbName = "hcatOutputFormatTestDB";
-  private static final String tblName = "hcatOutputFormatTestTable";
+    private static final String dbName = "hcatOutputFormatTestDB";
+    private static final String tblName = "hcatOutputFormatTestTable";
 
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
-    hiveConf = new HiveConf(this.getClass());
+    @Override
+    protected void setUp() throws Exception {
+        super.setUp();
+        hiveConf = new HiveConf(this.getClass());
 
-    try {
-      client = new HiveMetaStoreClient(hiveConf, null);
+        try {
+            client = new HiveMetaStoreClient(hiveConf, null);
 
-      initTable();
-    } catch (Throwable e) {
-      LOG.error("Unable to open the metastore", e);
-      throw new Exception(e);
+            initTable();
+        } catch (Throwable e) {
+            LOG.error("Unable to open the metastore", e);
+            throw new Exception(e);
+        }
     }
-  }
 
-  @Override
-  protected void tearDown() throws Exception {
-    try {
-      super.tearDown();
-      client.dropTable(dbName, tblName);
-      client.dropDatabase(dbName);
+    @Override
+    protected void tearDown() throws Exception {
+        try {
+            super.tearDown();
+            client.dropTable(dbName, tblName);
+            client.dropDatabase(dbName);
 
-      client.close();
-    } catch (Throwable e) {
-        LOG.error("Unable to close metastore", e);
-      throw new Exception(e);
+            client.close();
+        } catch (Throwable e) {
+            LOG.error("Unable to close metastore", e);
+            throw new Exception(e);
+        }
     }
-  }
 
-  private void initTable() throws Exception {
+    private void initTable() throws Exception {
 
-    try {
-      client.dropTable(dbName, tblName);
-    } catch(Exception e) {}
-    try {
-      client.dropDatabase(dbName);
-    } catch(Exception e) {}
-    client.createDatabase(new Database(dbName, "", null,null));
-    assertNotNull((client.getDatabase(dbName).getLocationUri()));
+        try {
+            client.dropTable(dbName, tblName);
+        } catch (Exception e) {
+        }
+        try {
+            client.dropDatabase(dbName);
+        } catch (Exception e) {
+        }
+        client.createDatabase(new Database(dbName, "", null, null));
+        assertNotNull((client.getDatabase(dbName).getLocationUri()));
 
-    List<FieldSchema> fields = new ArrayList<FieldSchema>();
-    fields.add(new FieldSchema("colname", Constants.STRING_TYPE_NAME, ""));
+        List<FieldSchema> fields = new ArrayList<FieldSchema>();
+        fields.add(new FieldSchema("colname", Constants.STRING_TYPE_NAME, ""));
 
-    Table tbl = new Table();
-    tbl.setDbName(dbName);
-    tbl.setTableName(tblName);
-    StorageDescriptor sd = new StorageDescriptor();
-    sd.setCols(fields);
-    tbl.setSd(sd);
+        Table tbl = new Table();
+        tbl.setDbName(dbName);
+        tbl.setTableName(tblName);
+        StorageDescriptor sd = new StorageDescriptor();
+        sd.setCols(fields);
+        tbl.setSd(sd);
 
-    //sd.setLocation("hdfs://tmp");
-    sd.setInputFormat(RCFileInputFormat.class.getName());
-    sd.setOutputFormat(RCFileOutputFormat.class.getName());
-    sd.setParameters(new HashMap<String, String>());
-    sd.getParameters().put("test_param_1", "Use this for comments etc");
-    //sd.setBucketCols(new ArrayList<String>(2));
-    //sd.getBucketCols().add("name");
-    sd.setSerdeInfo(new SerDeInfo());
-    sd.getSerdeInfo().setName(tbl.getTableName());
-    sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-    sd.getSerdeInfo().getParameters().put(
-        org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
-    sd.getSerdeInfo().setSerializationLib(
-        org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
-    tbl.setPartitionKeys(fields);
+        //sd.setLocation("hdfs://tmp");
+        sd.setInputFormat(RCFileInputFormat.class.getName());
+        sd.setOutputFormat(RCFileOutputFormat.class.getName());
+        sd.setParameters(new HashMap<String, String>());
+        sd.getParameters().put("test_param_1", "Use this for comments etc");
+        //sd.setBucketCols(new ArrayList<String>(2));
+        //sd.getBucketCols().add("name");
+        sd.setSerdeInfo(new SerDeInfo());
+        sd.getSerdeInfo().setName(tbl.getTableName());
+        sd.getSerdeInfo().setParameters(new HashMap<String, String>());
+        sd.getSerdeInfo().getParameters().put(
+                org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
+        sd.getSerdeInfo().setSerializationLib(
+                org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
+        tbl.setPartitionKeys(fields);
 
-    Map<String, String> tableParams = new HashMap<String, String>();
-    tableParams.put("hcat.testarg", "testArgValue");
+        Map<String, String> tableParams = new HashMap<String, String>();
+        tableParams.put("hcat.testarg", "testArgValue");
 
-    tbl.setParameters(tableParams);
+        tbl.setParameters(tableParams);
 
-    client.createTable(tbl);
-    Path tblPath = new Path(client.getTable(dbName, tblName).getSd().getLocation());
-    assertTrue(tblPath.getFileSystem(hiveConf).mkdirs(new Path(tblPath,"colname=p1")));
+        client.createTable(tbl);
+        Path tblPath = new Path(client.getTable(dbName, tblName).getSd().getLocation());
+        assertTrue(tblPath.getFileSystem(hiveConf).mkdirs(new Path(tblPath, "colname=p1")));
 
-  }
+    }
 
-  public void testSetOutput() throws Exception {
-    Configuration conf = new Configuration();
-    Job job = new Job(conf, "test outputformat");
+    public void testSetOutput() throws Exception {
+        Configuration conf = new Configuration();
+        Job job = new Job(conf, "test outputformat");
 
-    Map<String, String> partitionValues = new HashMap<String, String>();
-    partitionValues.put("colname", "p1");
-    //null server url means local mode
-    OutputJobInfo info = OutputJobInfo.create(dbName, tblName, partitionValues);
+        Map<String, String> partitionValues = new HashMap<String, String>();
+        partitionValues.put("colname", "p1");
+        //null server url means local mode
+        OutputJobInfo info = OutputJobInfo.create(dbName, tblName, partitionValues);
 
-    HCatOutputFormat.setOutput(job, info);
-    OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(job);
+        HCatOutputFormat.setOutput(job, info);
+        OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(job);
 
-    assertNotNull(jobInfo.getTableInfo());
-    assertEquals(1, jobInfo.getPartitionValues().size());
-    assertEquals("p1", jobInfo.getPartitionValues().get("colname"));
-    assertEquals(1, jobInfo.getTableInfo().getDataColumns().getFields().size());
-    assertEquals("colname", jobInfo.getTableInfo().getDataColumns().getFields().get(0).getName());
+        assertNotNull(jobInfo.getTableInfo());
+        assertEquals(1, jobInfo.getPartitionValues().size());
+        assertEquals("p1", jobInfo.getPartitionValues().get("colname"));
+        assertEquals(1, jobInfo.getTableInfo().getDataColumns().getFields().size());
+        assertEquals("colname", jobInfo.getTableInfo().getDataColumns().getFields().get(0).getName());
 
-    publishTest(job);
-  }
+        publishTest(job);
+    }
 
-  public void publishTest(Job job) throws Exception {
-    OutputCommitter committer = new FileOutputCommitterContainer(job,null);
-    committer.cleanupJob(job);
+    public void publishTest(Job job) throws Exception {
+        OutputCommitter committer = new FileOutputCommitterContainer(job, null);
+        committer.cleanupJob(job);
 
-    Partition part = client.getPartition(dbName, tblName, Arrays.asList("p1"));
-    assertNotNull(part);
+        Partition part = client.getPartition(dbName, tblName, Arrays.asList("p1"));
+        assertNotNull(part);
 
-    StorerInfo storer = InternalUtil.extractStorerInfo(part.getSd(),part.getParameters());
-    assertEquals(storer.getProperties().get("hcat.testarg"), "testArgValue");
-    assertTrue(part.getSd().getLocation().indexOf("p1") != -1);
-  }
+        StorerInfo storer = InternalUtil.extractStorerInfo(part.getSd(), part.getParameters());
+        assertEquals(storer.getProperties().get("hcat.testarg"), "testArgValue");
+        assertTrue(part.getSd().getLocation().indexOf("p1") != -1);
+    }
 }
diff --git a/src/test/org/apache/hcatalog/mapreduce/TestHCatPartitioned.java b/src/test/org/apache/hcatalog/mapreduce/TestHCatPartitioned.java
index 42ad9fd..c9561a9 100644
--- a/src/test/org/apache/hcatalog/mapreduce/TestHCatPartitioned.java
+++ b/src/test/org/apache/hcatalog/mapreduce/TestHCatPartitioned.java
@@ -36,309 +36,309 @@
 
 public class TestHCatPartitioned extends HCatMapReduceTest {
 
-  private List<HCatRecord> writeRecords;
-  private List<HCatFieldSchema> partitionColumns;
+    private List<HCatRecord> writeRecords;
+    private List<HCatFieldSchema> partitionColumns;
 
-  @Override
-  protected void initialize() throws Exception {
+    @Override
+    protected void initialize() throws Exception {
 
-    tableName = "testHCatPartitionedTable";
-    writeRecords = new ArrayList<HCatRecord>();
+        tableName = "testHCatPartitionedTable";
+        writeRecords = new ArrayList<HCatRecord>();
 
-    for(int i = 0;i < 20;i++) {
-      List<Object> objList = new ArrayList<Object>();
+        for (int i = 0; i < 20; i++) {
+            List<Object> objList = new ArrayList<Object>();
 
-      objList.add(i);
-      objList.add("strvalue" + i);
-      writeRecords.add(new DefaultHCatRecord(objList));
+            objList.add(i);
+            objList.add("strvalue" + i);
+            writeRecords.add(new DefaultHCatRecord(objList));
+        }
+
+        partitionColumns = new ArrayList<HCatFieldSchema>();
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
     }
 
-    partitionColumns = new ArrayList<HCatFieldSchema>();
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
-  }
 
-
-  @Override
-  protected List<FieldSchema> getPartitionKeys() {
-    List<FieldSchema> fields = new ArrayList<FieldSchema>();
-    //Defining partition names in unsorted order
-    fields.add(new FieldSchema("PaRT1", Constants.STRING_TYPE_NAME, ""));
-    fields.add(new FieldSchema("part0", Constants.STRING_TYPE_NAME, ""));
-    return fields;
-  }
-
-  @Override
-  protected List<FieldSchema> getTableColumns() {
-    List<FieldSchema> fields = new ArrayList<FieldSchema>();
-    fields.add(new FieldSchema("c1", Constants.INT_TYPE_NAME, ""));
-    fields.add(new FieldSchema("c2", Constants.STRING_TYPE_NAME, ""));
-    return fields;
-  }
-
-
-  public void testHCatPartitionedTable() throws Exception {
-
-    Map<String, String> partitionMap = new HashMap<String, String>();
-    partitionMap.put("part1", "p1value1");
-    partitionMap.put("part0", "p0value1");
-
-    runMRCreate(partitionMap, partitionColumns, writeRecords, 10,true);
-
-    partitionMap.clear();
-    partitionMap.put("PART1", "p1value2");
-    partitionMap.put("PART0", "p0value2");
-
-    runMRCreate(partitionMap, partitionColumns, writeRecords, 20,true);
-
-    //Test for duplicate publish
-    IOException exc = null;
-    try {
-      runMRCreate(partitionMap, partitionColumns, writeRecords, 20,true);
-    } catch(IOException e) {
-      exc = e;
+    @Override
+    protected List<FieldSchema> getPartitionKeys() {
+        List<FieldSchema> fields = new ArrayList<FieldSchema>();
+        //Defining partition names in unsorted order
+        fields.add(new FieldSchema("PaRT1", Constants.STRING_TYPE_NAME, ""));
+        fields.add(new FieldSchema("part0", Constants.STRING_TYPE_NAME, ""));
+        return fields;
     }
 
-    assertTrue(exc != null);
-    assertTrue(exc instanceof HCatException);
-    assertEquals(ErrorType.ERROR_DUPLICATE_PARTITION, ((HCatException) exc).getErrorType());
-
-    //Test for publish with invalid partition key name
-    exc = null;
-    partitionMap.clear();
-    partitionMap.put("px1", "p1value2");
-    partitionMap.put("px0", "p0value2");
-
-    try {
-      runMRCreate(partitionMap, partitionColumns, writeRecords, 20,true);
-    } catch(IOException e) {
-      exc = e;
+    @Override
+    protected List<FieldSchema> getTableColumns() {
+        List<FieldSchema> fields = new ArrayList<FieldSchema>();
+        fields.add(new FieldSchema("c1", Constants.INT_TYPE_NAME, ""));
+        fields.add(new FieldSchema("c2", Constants.STRING_TYPE_NAME, ""));
+        return fields;
     }
 
-    assertTrue(exc != null);
-    assertTrue(exc instanceof HCatException);
-    assertEquals(ErrorType.ERROR_MISSING_PARTITION_KEY, ((HCatException) exc).getErrorType());
 
-    //Test for publish with missing partition key values
-    exc = null;
-    partitionMap.clear();
-    partitionMap.put("px", "p1value2");
+    public void testHCatPartitionedTable() throws Exception {
 
-    try {
-      runMRCreate(partitionMap, partitionColumns, writeRecords, 20,true);
-    } catch(IOException e) {
-      exc = e;
-    }
+        Map<String, String> partitionMap = new HashMap<String, String>();
+        partitionMap.put("part1", "p1value1");
+        partitionMap.put("part0", "p0value1");
 
-    assertTrue(exc != null);
-    assertTrue(exc instanceof HCatException);
-    assertEquals(ErrorType.ERROR_INVALID_PARTITION_VALUES, ((HCatException) exc).getErrorType());
+        runMRCreate(partitionMap, partitionColumns, writeRecords, 10, true);
+
+        partitionMap.clear();
+        partitionMap.put("PART1", "p1value2");
+        partitionMap.put("PART0", "p0value2");
+
+        runMRCreate(partitionMap, partitionColumns, writeRecords, 20, true);
+
+        //Test for duplicate publish
+        IOException exc = null;
+        try {
+            runMRCreate(partitionMap, partitionColumns, writeRecords, 20, true);
+        } catch (IOException e) {
+            exc = e;
+        }
+
+        assertTrue(exc != null);
+        assertTrue(exc instanceof HCatException);
+        assertEquals(ErrorType.ERROR_DUPLICATE_PARTITION, ((HCatException) exc).getErrorType());
+
+        //Test for publish with invalid partition key name
+        exc = null;
+        partitionMap.clear();
+        partitionMap.put("px1", "p1value2");
+        partitionMap.put("px0", "p0value2");
+
+        try {
+            runMRCreate(partitionMap, partitionColumns, writeRecords, 20, true);
+        } catch (IOException e) {
+            exc = e;
+        }
+
+        assertTrue(exc != null);
+        assertTrue(exc instanceof HCatException);
+        assertEquals(ErrorType.ERROR_MISSING_PARTITION_KEY, ((HCatException) exc).getErrorType());
+
+        //Test for publish with missing partition key values
+        exc = null;
+        partitionMap.clear();
+        partitionMap.put("px", "p1value2");
+
+        try {
+            runMRCreate(partitionMap, partitionColumns, writeRecords, 20, true);
+        } catch (IOException e) {
+            exc = e;
+        }
+
+        assertTrue(exc != null);
+        assertTrue(exc instanceof HCatException);
+        assertEquals(ErrorType.ERROR_INVALID_PARTITION_VALUES, ((HCatException) exc).getErrorType());
 
 
-    //Test for null partition value map
-    exc = null;
-    try {
-      runMRCreate(null, partitionColumns, writeRecords, 20,false);
-    } catch(IOException e) {
-      exc = e;
-    }
+        //Test for null partition value map
+        exc = null;
+        try {
+            runMRCreate(null, partitionColumns, writeRecords, 20, false);
+        } catch (IOException e) {
+            exc = e;
+        }
 
-    assertTrue(exc == null);
+        assertTrue(exc == null);
 //    assertTrue(exc instanceof HCatException);
 //    assertEquals(ErrorType.ERROR_PUBLISHING_PARTITION, ((HCatException) exc).getErrorType());
-    // With Dynamic partitioning, this isn't an error that the keyValues specified didn't values
+        // With Dynamic partitioning, this isn't an error that the keyValues specified didn't values
 
-    //Read should get 10 + 20 rows
-    runMRRead(30);
+        //Read should get 10 + 20 rows
+        runMRRead(30);
 
-    //Read with partition filter
-    runMRRead(10, "part1 = \"p1value1\"");
-    runMRRead(20, "part1 = \"p1value2\"");
-    runMRRead(30, "part1 = \"p1value1\" or part1 = \"p1value2\"");
-    runMRRead(10, "part0 = \"p0value1\"");
-    runMRRead(20, "part0 = \"p0value2\"");
-    runMRRead(30, "part0 = \"p0value1\" or part0 = \"p0value2\"");
+        //Read with partition filter
+        runMRRead(10, "part1 = \"p1value1\"");
+        runMRRead(20, "part1 = \"p1value2\"");
+        runMRRead(30, "part1 = \"p1value1\" or part1 = \"p1value2\"");
+        runMRRead(10, "part0 = \"p0value1\"");
+        runMRRead(20, "part0 = \"p0value2\"");
+        runMRRead(30, "part0 = \"p0value1\" or part0 = \"p0value2\"");
 
-    tableSchemaTest();
-    columnOrderChangeTest();
-    hiveReadTest();
-  }
-
-
-  //test that new columns gets added to table schema
-  private void tableSchemaTest() throws Exception {
-
-    HCatSchema tableSchema = getTableSchema();
-
-    assertEquals(4, tableSchema.getFields().size());
-
-    //Update partition schema to have 3 fields
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c3", Constants.STRING_TYPE_NAME, "")));
-
-    writeRecords = new ArrayList<HCatRecord>();
-
-    for(int i = 0;i < 20;i++) {
-      List<Object> objList = new ArrayList<Object>();
-
-      objList.add(i);
-      objList.add("strvalue" + i);
-      objList.add("str2value" + i);
-
-      writeRecords.add(new DefaultHCatRecord(objList));
+        tableSchemaTest();
+        columnOrderChangeTest();
+        hiveReadTest();
     }
 
-    Map<String, String> partitionMap = new HashMap<String, String>();
-    partitionMap.put("part1", "p1value5");
-    partitionMap.put("part0", "p0value5");
 
-    runMRCreate(partitionMap, partitionColumns, writeRecords, 10,true);
+    //test that new columns gets added to table schema
+    private void tableSchemaTest() throws Exception {
 
-    tableSchema = getTableSchema();
+        HCatSchema tableSchema = getTableSchema();
 
-    //assert that c3 has got added to table schema
-    assertEquals(5, tableSchema.getFields().size());
-    assertEquals("c1", tableSchema.getFields().get(0).getName());
-    assertEquals("c2", tableSchema.getFields().get(1).getName());
-    assertEquals("c3", tableSchema.getFields().get(2).getName());
-    assertEquals("part1", tableSchema.getFields().get(3).getName());
-    assertEquals("part0", tableSchema.getFields().get(4).getName());
+        assertEquals(4, tableSchema.getFields().size());
 
-    //Test that changing column data type fails
-    partitionMap.clear();
-    partitionMap.put("part1", "p1value6");
-    partitionMap.put("part0", "p0value6");
+        //Update partition schema to have 3 fields
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c3", Constants.STRING_TYPE_NAME, "")));
 
-    partitionColumns = new ArrayList<HCatFieldSchema>();
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.INT_TYPE_NAME, "")));
+        writeRecords = new ArrayList<HCatRecord>();
 
-    IOException exc = null;
-    try {
-      runMRCreate(partitionMap, partitionColumns, writeRecords, 20,true);
-    } catch(IOException e) {
-      exc = e;
+        for (int i = 0; i < 20; i++) {
+            List<Object> objList = new ArrayList<Object>();
+
+            objList.add(i);
+            objList.add("strvalue" + i);
+            objList.add("str2value" + i);
+
+            writeRecords.add(new DefaultHCatRecord(objList));
+        }
+
+        Map<String, String> partitionMap = new HashMap<String, String>();
+        partitionMap.put("part1", "p1value5");
+        partitionMap.put("part0", "p0value5");
+
+        runMRCreate(partitionMap, partitionColumns, writeRecords, 10, true);
+
+        tableSchema = getTableSchema();
+
+        //assert that c3 has got added to table schema
+        assertEquals(5, tableSchema.getFields().size());
+        assertEquals("c1", tableSchema.getFields().get(0).getName());
+        assertEquals("c2", tableSchema.getFields().get(1).getName());
+        assertEquals("c3", tableSchema.getFields().get(2).getName());
+        assertEquals("part1", tableSchema.getFields().get(3).getName());
+        assertEquals("part0", tableSchema.getFields().get(4).getName());
+
+        //Test that changing column data type fails
+        partitionMap.clear();
+        partitionMap.put("part1", "p1value6");
+        partitionMap.put("part0", "p0value6");
+
+        partitionColumns = new ArrayList<HCatFieldSchema>();
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.INT_TYPE_NAME, "")));
+
+        IOException exc = null;
+        try {
+            runMRCreate(partitionMap, partitionColumns, writeRecords, 20, true);
+        } catch (IOException e) {
+            exc = e;
+        }
+
+        assertTrue(exc != null);
+        assertTrue(exc instanceof HCatException);
+        assertEquals(ErrorType.ERROR_SCHEMA_TYPE_MISMATCH, ((HCatException) exc).getErrorType());
+
+        //Test that partition key is not allowed in data
+        partitionColumns = new ArrayList<HCatFieldSchema>();
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c3", Constants.STRING_TYPE_NAME, "")));
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("part1", Constants.STRING_TYPE_NAME, "")));
+
+        List<HCatRecord> recordsContainingPartitionCols = new ArrayList<HCatRecord>(20);
+        for (int i = 0; i < 20; i++) {
+            List<Object> objList = new ArrayList<Object>();
+
+            objList.add(i);
+            objList.add("c2value" + i);
+            objList.add("c3value" + i);
+            objList.add("p1value6");
+
+            recordsContainingPartitionCols.add(new DefaultHCatRecord(objList));
+        }
+
+        exc = null;
+        try {
+            runMRCreate(partitionMap, partitionColumns, recordsContainingPartitionCols, 20, true);
+        } catch (IOException e) {
+            exc = e;
+        }
+
+        List<HCatRecord> records = runMRRead(20, "part1 = \"p1value6\"");
+        assertEquals(20, records.size());
+        records = runMRRead(20, "part0 = \"p0value6\"");
+        assertEquals(20, records.size());
+        Integer i = 0;
+        for (HCatRecord rec : records) {
+            assertEquals(5, rec.size());
+            assertTrue(rec.get(0).equals(i));
+            assertTrue(rec.get(1).equals("c2value" + i));
+            assertTrue(rec.get(2).equals("c3value" + i));
+            assertTrue(rec.get(3).equals("p1value6"));
+            assertTrue(rec.get(4).equals("p0value6"));
+            i++;
+        }
     }
 
-    assertTrue(exc != null);
-    assertTrue(exc instanceof HCatException);
-    assertEquals(ErrorType.ERROR_SCHEMA_TYPE_MISMATCH, ((HCatException) exc).getErrorType());
+    //check behavior while change the order of columns
+    private void columnOrderChangeTest() throws Exception {
 
-    //Test that partition key is not allowed in data
-    partitionColumns = new ArrayList<HCatFieldSchema>();
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c3", Constants.STRING_TYPE_NAME, "")));
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("part1", Constants.STRING_TYPE_NAME, "")));
+        HCatSchema tableSchema = getTableSchema();
 
-    List<HCatRecord> recordsContainingPartitionCols = new ArrayList<HCatRecord>(20);
-    for(int i = 0;i < 20;i++) {
-      List<Object> objList = new ArrayList<Object>();
+        assertEquals(5, tableSchema.getFields().size());
 
-      objList.add(i);
-      objList.add("c2value" + i);
-      objList.add("c3value" + i);
-      objList.add("p1value6");
+        partitionColumns = new ArrayList<HCatFieldSchema>();
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c3", Constants.STRING_TYPE_NAME, "")));
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
 
-      recordsContainingPartitionCols.add(new DefaultHCatRecord(objList));
+
+        writeRecords = new ArrayList<HCatRecord>();
+
+        for (int i = 0; i < 10; i++) {
+            List<Object> objList = new ArrayList<Object>();
+
+            objList.add(i);
+            objList.add("co strvalue" + i);
+            objList.add("co str2value" + i);
+
+            writeRecords.add(new DefaultHCatRecord(objList));
+        }
+
+        Map<String, String> partitionMap = new HashMap<String, String>();
+        partitionMap.put("part1", "p1value8");
+        partitionMap.put("part0", "p0value8");
+
+        Exception exc = null;
+        try {
+            runMRCreate(partitionMap, partitionColumns, writeRecords, 10, true);
+        } catch (IOException e) {
+            exc = e;
+        }
+
+        assertTrue(exc != null);
+        assertTrue(exc instanceof HCatException);
+        assertEquals(ErrorType.ERROR_SCHEMA_COLUMN_MISMATCH, ((HCatException) exc).getErrorType());
+
+
+        partitionColumns = new ArrayList<HCatFieldSchema>();
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
+
+        writeRecords = new ArrayList<HCatRecord>();
+
+        for (int i = 0; i < 10; i++) {
+            List<Object> objList = new ArrayList<Object>();
+
+            objList.add(i);
+            objList.add("co strvalue" + i);
+
+            writeRecords.add(new DefaultHCatRecord(objList));
+        }
+
+        runMRCreate(partitionMap, partitionColumns, writeRecords, 10, true);
+
+        //Read should get 10 + 20 + 10 + 10 + 20 rows
+        runMRRead(70);
     }
 
-    exc = null;
-    try {
-      runMRCreate(partitionMap, partitionColumns, recordsContainingPartitionCols, 20,true);
-    } catch(IOException e) {
-      exc = e;
+    //Test that data inserted through hcatoutputformat is readable from hive
+    private void hiveReadTest() throws Exception {
+
+        String query = "select * from " + tableName;
+        int retCode = driver.run(query).getResponseCode();
+
+        if (retCode != 0) {
+            throw new Exception("Error " + retCode + " running query " + query);
+        }
+
+        ArrayList<String> res = new ArrayList<String>();
+        driver.getResults(res);
+        assertEquals(70, res.size());
     }
-
-    List<HCatRecord> records= runMRRead(20,"part1 = \"p1value6\"");
-    assertEquals(20, records.size());
-    records= runMRRead(20,"part0 = \"p0value6\"");
-    assertEquals(20, records.size());
-    Integer i =0;
-    for(HCatRecord rec : records){
-      assertEquals(5, rec.size());
-      assertTrue(rec.get(0).equals(i));
-      assertTrue(rec.get(1).equals("c2value"+i));
-      assertTrue(rec.get(2).equals("c3value"+i));
-      assertTrue(rec.get(3).equals("p1value6"));
-      assertTrue(rec.get(4).equals("p0value6"));
-      i++;
-    }
-  }
-
-  //check behavior while change the order of columns
-  private void columnOrderChangeTest() throws Exception {
-
-    HCatSchema tableSchema = getTableSchema();
-
-    assertEquals(5, tableSchema.getFields().size());
-
-    partitionColumns = new ArrayList<HCatFieldSchema>();
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c3", Constants.STRING_TYPE_NAME, "")));
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
-
-
-    writeRecords = new ArrayList<HCatRecord>();
-
-    for(int i = 0;i < 10;i++) {
-      List<Object> objList = new ArrayList<Object>();
-
-      objList.add(i);
-      objList.add("co strvalue" + i);
-      objList.add("co str2value" + i);
-
-      writeRecords.add(new DefaultHCatRecord(objList));
-    }
-
-    Map<String, String> partitionMap = new HashMap<String, String>();
-    partitionMap.put("part1", "p1value8");
-    partitionMap.put("part0", "p0value8");
-
-    Exception exc = null;
-    try {
-      runMRCreate(partitionMap, partitionColumns, writeRecords, 10,true);
-    } catch(IOException e) {
-      exc = e;
-    }
-
-    assertTrue(exc != null);
-    assertTrue(exc instanceof HCatException);
-    assertEquals(ErrorType.ERROR_SCHEMA_COLUMN_MISMATCH, ((HCatException) exc).getErrorType());
-
-
-    partitionColumns = new ArrayList<HCatFieldSchema>();
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
-
-    writeRecords = new ArrayList<HCatRecord>();
-
-    for(int i = 0;i < 10;i++) {
-      List<Object> objList = new ArrayList<Object>();
-
-      objList.add(i);
-      objList.add("co strvalue" + i);
-
-      writeRecords.add(new DefaultHCatRecord(objList));
-    }
-
-    runMRCreate(partitionMap, partitionColumns, writeRecords, 10,true);
-
-    //Read should get 10 + 20 + 10 + 10 + 20 rows
-    runMRRead(70);
-  }
-
-  //Test that data inserted through hcatoutputformat is readable from hive
-  private void hiveReadTest() throws Exception {
-
-    String query = "select * from " + tableName;
-    int retCode = driver.run(query).getResponseCode();
-
-    if( retCode != 0 ) {
-      throw new Exception("Error " + retCode + " running query " + query);
-    }
-
-    ArrayList<String> res = new ArrayList<String>();
-    driver.getResults(res);
-    assertEquals(70, res.size());
-  }
 }
diff --git a/src/test/org/apache/hcatalog/mapreduce/TestMultiOutputFormat.java b/src/test/org/apache/hcatalog/mapreduce/TestMultiOutputFormat.java
index 155f7ad..35ee01c 100644
--- a/src/test/org/apache/hcatalog/mapreduce/TestMultiOutputFormat.java
+++ b/src/test/org/apache/hcatalog/mapreduce/TestMultiOutputFormat.java
@@ -74,7 +74,7 @@
         // LocalJobRunner does not work with mapreduce OutputCommitter. So need
         // to use MiniMRCluster. MAPREDUCE-2350
         mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null,
-                new JobConf(conf));
+            new JobConf(conf));
         mrConf = mrCluster.createJobConf();
     }
 
@@ -111,7 +111,7 @@
         JobConfigurer configurer = MultiOutputFormat.createConfigurer(job);
         configurer.addOutputFormat("out1", TextOutputFormat.class, IntWritable.class, Text.class);
         configurer.addOutputFormat("out2", SequenceFileOutputFormat.class, Text.class,
-                IntWritable.class);
+            IntWritable.class);
         Path outDir = new Path(workDir.getPath(), job.getJobName());
         FileOutputFormat.setOutputPath(configurer.getJob("out1"), new Path(outDir, "out1"));
         FileOutputFormat.setOutputPath(configurer.getJob("out2"), new Path(outDir, "out2"));
@@ -124,19 +124,19 @@
         DistributedCache.addFileToClassPath(new Path(inputFile), job.getConfiguration(), fs);
         String dummyFile = createInputFile("dummy file");
         DistributedCache.addFileToClassPath(new Path(dummyFile), configurer.getJob("out1")
-                .getConfiguration(), fs);
+            .getConfiguration(), fs);
         // duplicate of the value. Merging should remove duplicates
         DistributedCache.addFileToClassPath(new Path(inputFile), configurer.getJob("out2")
-                .getConfiguration(), fs);
+            .getConfiguration(), fs);
 
         configurer.configure();
 
         // Verify if the configs are merged
         Path[] fileClassPaths = DistributedCache.getFileClassPaths(job.getConfiguration());
-        Assert.assertArrayEquals(new Path[] {new Path(inputFile), new Path(dummyFile)},
-                fileClassPaths);
-        URI[] expectedCacheFiles = new URI[] {new Path(inputFile).makeQualified(fs).toUri(),
-                new Path(dummyFile).makeQualified(fs).toUri()};
+        Assert.assertArrayEquals(new Path[]{new Path(inputFile), new Path(dummyFile)},
+            fileClassPaths);
+        URI[] expectedCacheFiles = new URI[]{new Path(inputFile).makeQualified(fs).toUri(),
+            new Path(dummyFile).makeQualified(fs).toUri()};
         URI[] cacheFiles = DistributedCache.getCacheFiles(job.getConfiguration());
         Assert.assertArrayEquals(expectedCacheFiles, cacheFiles);
 
@@ -180,9 +180,9 @@
 
         configurer.addOutputFormat("out1", TextOutputFormat.class, IntWritable.class, Text.class);
         configurer.addOutputFormat("out2", SequenceFileOutputFormat.class, Text.class,
-                IntWritable.class);
+            IntWritable.class);
         configurer.addOutputFormat("out3", NullOutputFormat.class, Text.class,
-                IntWritable.class);
+            IntWritable.class);
         Path outDir = new Path(workDir.getPath(), job.getJobName());
         FileOutputFormat.setOutputPath(configurer.getJob("out1"), new Path(outDir, "out1"));
         FileOutputFormat.setOutputPath(configurer.getJob("out2"), new Path(outDir, "out2"));
@@ -237,14 +237,14 @@
     }
 
     private static class MultiOutWordIndexMapper extends
-            Mapper<LongWritable, Text, Writable, Writable> {
+        Mapper<LongWritable, Text, Writable, Writable> {
 
         private IntWritable index = new IntWritable(1);
         private Text word = new Text();
 
         @Override
         protected void map(LongWritable key, Text value, Context context)
-                throws IOException, InterruptedException {
+            throws IOException, InterruptedException {
             StringTokenizer itr = new StringTokenizer(value.toString());
             while (itr.hasMoreTokens()) {
                 word.set(itr.nextToken());
@@ -256,14 +256,14 @@
     }
 
     private static class WordCountMapper extends
-            Mapper<LongWritable, Text, Text, IntWritable> {
+        Mapper<LongWritable, Text, Text, IntWritable> {
 
         private final static IntWritable one = new IntWritable(1);
         private Text word = new Text();
 
         @Override
         protected void map(LongWritable key, Text value, Context context)
-                throws IOException, InterruptedException {
+            throws IOException, InterruptedException {
             StringTokenizer itr = new StringTokenizer(value.toString());
             while (itr.hasMoreTokens()) {
                 word.set(itr.nextToken());
@@ -273,13 +273,13 @@
     }
 
     private static class MultiOutWordCountReducer extends
-            Reducer<Text, IntWritable, Writable, Writable> {
+        Reducer<Text, IntWritable, Writable, Writable> {
 
         private IntWritable count = new IntWritable();
 
         @Override
         protected void reduce(Text word, Iterable<IntWritable> values, Context context)
-                throws IOException, InterruptedException {
+            throws IOException, InterruptedException {
             int sum = 0;
             for (IntWritable val : values) {
                 sum += val.get();
@@ -292,23 +292,34 @@
     }
 
     private static class NullOutputFormat<K, V> extends
-            org.apache.hadoop.mapreduce.lib.output.NullOutputFormat<K, V> {
+        org.apache.hadoop.mapreduce.lib.output.NullOutputFormat<K, V> {
 
         @Override
         public OutputCommitter getOutputCommitter(TaskAttemptContext context) {
             return new OutputCommitter() {
-                public void abortTask(TaskAttemptContext taskContext) { }
-                public void cleanupJob(JobContext jobContext) { }
-                public void commitJob(JobContext jobContext) { }
+                public void abortTask(TaskAttemptContext taskContext) {
+                }
+
+                public void cleanupJob(JobContext jobContext) {
+                }
+
+                public void commitJob(JobContext jobContext) {
+                }
+
                 public void commitTask(TaskAttemptContext taskContext) {
                     Assert.fail("needsTaskCommit is false but commitTask was called");
                 }
+
                 public boolean needsTaskCommit(TaskAttemptContext taskContext) {
-                  return false;
+                    return false;
                 }
-                public void setupJob(JobContext jobContext) { }
-                public void setupTask(TaskAttemptContext taskContext) { }
-              };
+
+                public void setupJob(JobContext jobContext) {
+                }
+
+                public void setupTask(TaskAttemptContext taskContext) {
+                }
+            };
         }
     }
 
diff --git a/src/test/org/apache/hcatalog/mapreduce/TestPassProperties.java b/src/test/org/apache/hcatalog/mapreduce/TestPassProperties.java
index c062105..c890574 100644
--- a/src/test/org/apache/hcatalog/mapreduce/TestPassProperties.java
+++ b/src/test/org/apache/hcatalog/mapreduce/TestPassProperties.java
@@ -46,10 +46,10 @@
 import org.junit.Test;
 
 public class TestPassProperties {
-  private static final String TEST_DATA_DIR = System.getProperty("user.dir") +
-      "/build/test/data/" + TestSequenceFileReadWrite.class.getCanonicalName();
-  private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse";
-  private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data";
+    private static final String TEST_DATA_DIR = System.getProperty("user.dir") +
+            "/build/test/data/" + TestSequenceFileReadWrite.class.getCanonicalName();
+    private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse";
+    private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data";
 
     private static Driver driver;
     private static PigServer server;
@@ -79,7 +79,7 @@
     }
 
     @Test
-    public void testSequenceTableWriteReadMR() throws Exception{
+    public void testSequenceTableWriteReadMR() throws Exception {
         Initialize();
         String createTable = "CREATE TABLE bad_props_table(a0 int, a1 String, a2 String) STORED AS SEQUENCEFILE";
         driver.run("drop table bad_props_table");
@@ -88,56 +88,55 @@
 
         boolean caughtException = false;
         try {
-          Configuration conf = new Configuration();
-          conf.set("hive.metastore.uris", "thrift://no.such.machine:10888");
-          conf.set("hive.metastore.local", "false");
-          Job job = new Job(conf, "Write-hcat-seq-table");
-          job.setJarByClass(TestSequenceFileReadWrite.class);
-  
-          job.setMapperClass(Map.class);
-          job.setOutputKeyClass(NullWritable.class);
-          job.setOutputValueClass(DefaultHCatRecord.class);
-          job.setInputFormatClass(TextInputFormat.class);
-          TextInputFormat.setInputPaths(job, INPUT_FILE_NAME);
-  
-          HCatOutputFormat.setOutput(job, OutputJobInfo.create(
-                  MetaStoreUtils.DEFAULT_DATABASE_NAME, "bad_props_table", null));
-          job.setOutputFormatClass(HCatOutputFormat.class);
-          HCatOutputFormat.setSchema(job, getSchema());
-          job.setNumReduceTasks(0);
-          assertTrue(job.waitForCompletion(true));
-          new FileOutputCommitterContainer(job, null).cleanupJob(job);
+            Configuration conf = new Configuration();
+            conf.set("hive.metastore.uris", "thrift://no.such.machine:10888");
+            conf.set("hive.metastore.local", "false");
+            Job job = new Job(conf, "Write-hcat-seq-table");
+            job.setJarByClass(TestSequenceFileReadWrite.class);
+
+            job.setMapperClass(Map.class);
+            job.setOutputKeyClass(NullWritable.class);
+            job.setOutputValueClass(DefaultHCatRecord.class);
+            job.setInputFormatClass(TextInputFormat.class);
+            TextInputFormat.setInputPaths(job, INPUT_FILE_NAME);
+
+            HCatOutputFormat.setOutput(job, OutputJobInfo.create(
+                    MetaStoreUtils.DEFAULT_DATABASE_NAME, "bad_props_table", null));
+            job.setOutputFormatClass(HCatOutputFormat.class);
+            HCatOutputFormat.setSchema(job, getSchema());
+            job.setNumReduceTasks(0);
+            assertTrue(job.waitForCompletion(true));
+            new FileOutputCommitterContainer(job, null).cleanupJob(job);
         } catch (Exception e) {
             caughtException = true;
             assertTrue(e.getMessage().contains(
-              "Could not connect to meta store using any of the URIs provided"));
+                    "Could not connect to meta store using any of the URIs provided"));
         }
         assertTrue(caughtException);
     }
-    
-    public static class Map extends Mapper<LongWritable, Text, NullWritable, DefaultHCatRecord>{
 
-      public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
-          String[] cols = value.toString().split(",");
-          DefaultHCatRecord record = new DefaultHCatRecord(3);
-          record.set(0,Integer.parseInt(cols[0]));
-          record.set(1,cols[1]);
-          record.set(2,cols[2]);
-          context.write(NullWritable.get(), record);
-      }
+    public static class Map extends Mapper<LongWritable, Text, NullWritable, DefaultHCatRecord> {
+
+        public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
+            String[] cols = value.toString().split(",");
+            DefaultHCatRecord record = new DefaultHCatRecord(3);
+            record.set(0, Integer.parseInt(cols[0]));
+            record.set(1, cols[1]);
+            record.set(2, cols[2]);
+            context.write(NullWritable.get(), record);
+        }
     }
 
-  private HCatSchema getSchema() throws HCatException {
-      HCatSchema schema = new HCatSchema(new ArrayList<HCatFieldSchema>());
-      schema.append(new HCatFieldSchema("a0", HCatFieldSchema.Type.INT,
-              ""));
-      schema.append(new HCatFieldSchema("a1",
-              HCatFieldSchema.Type.STRING, ""));
-      schema.append(new HCatFieldSchema("a2",
-              HCatFieldSchema.Type.STRING, ""));
-      return schema;
-  }
-
+    private HCatSchema getSchema() throws HCatException {
+        HCatSchema schema = new HCatSchema(new ArrayList<HCatFieldSchema>());
+        schema.append(new HCatFieldSchema("a0", HCatFieldSchema.Type.INT,
+                ""));
+        schema.append(new HCatFieldSchema("a1",
+                HCatFieldSchema.Type.STRING, ""));
+        schema.append(new HCatFieldSchema("a2",
+                HCatFieldSchema.Type.STRING, ""));
+        return schema;
+    }
 
 
 }
diff --git a/src/test/org/apache/hcatalog/mapreduce/TestSequenceFileReadWrite.java b/src/test/org/apache/hcatalog/mapreduce/TestSequenceFileReadWrite.java
index 38f4c66..d89e885 100644
--- a/src/test/org/apache/hcatalog/mapreduce/TestSequenceFileReadWrite.java
+++ b/src/test/org/apache/hcatalog/mapreduce/TestSequenceFileReadWrite.java
@@ -53,10 +53,10 @@
 import org.junit.Test;
 
 public class TestSequenceFileReadWrite extends TestCase {
-  private static final String TEST_DATA_DIR = System.getProperty("user.dir") +
-      "/build/test/data/" + TestSequenceFileReadWrite.class.getCanonicalName();
-  private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse";
-  private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data";
+    private static final String TEST_DATA_DIR = System.getProperty("user.dir") +
+            "/build/test/data/" + TestSequenceFileReadWrite.class.getCanonicalName();
+    private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse";
+    private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data";
 
     private static Driver driver;
     private static PigServer server;
@@ -86,7 +86,7 @@
     }
 
     @Test
-   public void testSequenceTableWriteRead() throws Exception{
+    public void testSequenceTableWriteRead() throws Exception {
         Initialize();
         String createTable = "CREATE TABLE demo_table(a0 int, a1 String, a2 String) STORED AS SEQUENCEFILE";
         driver.run("drop table demo_table");
@@ -112,10 +112,10 @@
             numTuplesRead++;
         }
         assertEquals(input.length, numTuplesRead);
-   }
+    }
 
     @Test
-    public void testTextTableWriteRead() throws Exception{
+    public void testTextTableWriteRead() throws Exception {
         Initialize();
         String createTable = "CREATE TABLE demo_table_1(a0 int, a1 String, a2 String) STORED AS TEXTFILE";
         driver.run("drop table demo_table_1");
@@ -144,7 +144,7 @@
     }
 
     @Test
-    public void testSequenceTableWriteReadMR() throws Exception{
+    public void testSequenceTableWriteReadMR() throws Exception {
         Initialize();
         String createTable = "CREATE TABLE demo_table_2(a0 int, a1 String, a2 String) STORED AS SEQUENCEFILE";
         driver.run("drop table demo_table_2");
@@ -238,27 +238,27 @@
     }
 
 
-  public static class Map extends Mapper<LongWritable, Text, NullWritable, DefaultHCatRecord>{
+    public static class Map extends Mapper<LongWritable, Text, NullWritable, DefaultHCatRecord> {
 
-      public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
-          String[] cols = value.toString().split(",");
-          DefaultHCatRecord record = new DefaultHCatRecord(3);
-          record.set(0,Integer.parseInt(cols[0]));
-          record.set(1,cols[1]);
-          record.set(2,cols[2]);
-          context.write(NullWritable.get(), record);
-      }
+        public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
+            String[] cols = value.toString().split(",");
+            DefaultHCatRecord record = new DefaultHCatRecord(3);
+            record.set(0, Integer.parseInt(cols[0]));
+            record.set(1, cols[1]);
+            record.set(2, cols[2]);
+            context.write(NullWritable.get(), record);
+        }
     }
 
-  private HCatSchema getSchema() throws HCatException {
-      HCatSchema schema = new HCatSchema(new ArrayList<HCatFieldSchema>());
-      schema.append(new HCatFieldSchema("a0", HCatFieldSchema.Type.INT,
-              ""));
-      schema.append(new HCatFieldSchema("a1",
-              HCatFieldSchema.Type.STRING, ""));
-      schema.append(new HCatFieldSchema("a2",
-              HCatFieldSchema.Type.STRING, ""));
-      return schema;
-  }
+    private HCatSchema getSchema() throws HCatException {
+        HCatSchema schema = new HCatSchema(new ArrayList<HCatFieldSchema>());
+        schema.append(new HCatFieldSchema("a0", HCatFieldSchema.Type.INT,
+                ""));
+        schema.append(new HCatFieldSchema("a1",
+                HCatFieldSchema.Type.STRING, ""));
+        schema.append(new HCatFieldSchema("a2",
+                HCatFieldSchema.Type.STRING, ""));
+        return schema;
+    }
 
 }
diff --git a/src/test/org/apache/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java b/src/test/org/apache/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java
index 11ea1aa..80deeb7 100644
--- a/src/test/org/apache/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java
+++ b/src/test/org/apache/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java
@@ -46,11 +46,11 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-  /**
-   * TestRCFile.
-   *
-   */
-  public class TestRCFileMapReduceInputFormat extends TestCase {
+/**
+ * TestRCFile.
+ *
+ */
+public class TestRCFileMapReduceInputFormat extends TestCase {
 
     private static final Logger LOG = LoggerFactory.getLogger(TestRCFileMapReduceInputFormat.class);
 
@@ -65,18 +65,18 @@
     private static Properties tbl;
 
     static {
-      try {
-        fs = FileSystem.getLocal(conf);
-        Path dir = new Path(System.getProperty("test.data.dir", ".") + "/mapred");
-        file = new Path(dir, "test_rcfile");
-        fs.delete(dir, true);
-        // the SerDe part is from TestLazySimpleSerDe
-        serDe = new ColumnarSerDe();
-        // Create the SerDe
-        tbl = createProperties();
-        serDe.initialize(conf, tbl);
-      } catch (Exception e) {
-      }
+        try {
+            fs = FileSystem.getLocal(conf);
+            Path dir = new Path(System.getProperty("test.data.dir", ".") + "/mapred");
+            file = new Path(dir, "test_rcfile");
+            fs.delete(dir, true);
+            // the SerDe part is from TestLazySimpleSerDe
+            serDe = new ColumnarSerDe();
+            // Create the SerDe
+            tbl = createProperties();
+            serDe.initialize(conf, tbl);
+        } catch (Exception e) {
+        }
     }
 
     private static BytesRefArrayWritable patialS = new BytesRefArrayWritable();
@@ -84,164 +84,164 @@
     private static byte[][] bytesArray = null;
 
     private static BytesRefArrayWritable s = null;
+
     static {
-      try {
-        bytesArray = new byte[][] {"123".getBytes("UTF-8"),
-            "456".getBytes("UTF-8"), "789".getBytes("UTF-8"),
-            "1000".getBytes("UTF-8"), "5.3".getBytes("UTF-8"),
-            "hive and hadoop".getBytes("UTF-8"), new byte[0],
-            "NULL".getBytes("UTF-8")};
-        s = new BytesRefArrayWritable(bytesArray.length);
-        s.set(0, new BytesRefWritable("123".getBytes("UTF-8")));
-        s.set(1, new BytesRefWritable("456".getBytes("UTF-8")));
-        s.set(2, new BytesRefWritable("789".getBytes("UTF-8")));
-        s.set(3, new BytesRefWritable("1000".getBytes("UTF-8")));
-        s.set(4, new BytesRefWritable("5.3".getBytes("UTF-8")));
-        s.set(5, new BytesRefWritable("hive and hadoop".getBytes("UTF-8")));
-        s.set(6, new BytesRefWritable("NULL".getBytes("UTF-8")));
-        s.set(7, new BytesRefWritable("NULL".getBytes("UTF-8")));
+        try {
+            bytesArray = new byte[][]{"123".getBytes("UTF-8"),
+                "456".getBytes("UTF-8"), "789".getBytes("UTF-8"),
+                "1000".getBytes("UTF-8"), "5.3".getBytes("UTF-8"),
+                "hive and hadoop".getBytes("UTF-8"), new byte[0],
+                "NULL".getBytes("UTF-8")};
+            s = new BytesRefArrayWritable(bytesArray.length);
+            s.set(0, new BytesRefWritable("123".getBytes("UTF-8")));
+            s.set(1, new BytesRefWritable("456".getBytes("UTF-8")));
+            s.set(2, new BytesRefWritable("789".getBytes("UTF-8")));
+            s.set(3, new BytesRefWritable("1000".getBytes("UTF-8")));
+            s.set(4, new BytesRefWritable("5.3".getBytes("UTF-8")));
+            s.set(5, new BytesRefWritable("hive and hadoop".getBytes("UTF-8")));
+            s.set(6, new BytesRefWritable("NULL".getBytes("UTF-8")));
+            s.set(7, new BytesRefWritable("NULL".getBytes("UTF-8")));
 
-        // partial test init
-        patialS.set(0, new BytesRefWritable("NULL".getBytes("UTF-8")));
-        patialS.set(1, new BytesRefWritable("NULL".getBytes("UTF-8")));
-        patialS.set(2, new BytesRefWritable("789".getBytes("UTF-8")));
-        patialS.set(3, new BytesRefWritable("1000".getBytes("UTF-8")));
-        patialS.set(4, new BytesRefWritable("NULL".getBytes("UTF-8")));
-        patialS.set(5, new BytesRefWritable("NULL".getBytes("UTF-8")));
-        patialS.set(6, new BytesRefWritable("NULL".getBytes("UTF-8")));
-        patialS.set(7, new BytesRefWritable("NULL".getBytes("UTF-8")));
+            // partial test init
+            patialS.set(0, new BytesRefWritable("NULL".getBytes("UTF-8")));
+            patialS.set(1, new BytesRefWritable("NULL".getBytes("UTF-8")));
+            patialS.set(2, new BytesRefWritable("789".getBytes("UTF-8")));
+            patialS.set(3, new BytesRefWritable("1000".getBytes("UTF-8")));
+            patialS.set(4, new BytesRefWritable("NULL".getBytes("UTF-8")));
+            patialS.set(5, new BytesRefWritable("NULL".getBytes("UTF-8")));
+            patialS.set(6, new BytesRefWritable("NULL".getBytes("UTF-8")));
+            patialS.set(7, new BytesRefWritable("NULL".getBytes("UTF-8")));
 
-      } catch (UnsupportedEncodingException e) {
-      }
+        } catch (UnsupportedEncodingException e) {
+        }
     }
 
 
     /** For debugging and testing. */
     public static void main(String[] args) throws Exception {
-      int count = 10000;
-      boolean create = true;
+        int count = 10000;
+        boolean create = true;
 
-      String usage = "Usage: RCFile " + "[-count N]" + " file";
-      if (args.length == 0) {
-        LOG.error(usage);
-        System.exit(-1);
-      }
-
-      try {
-        for (int i = 0; i < args.length; ++i) { // parse command line
-          if (args[i] == null) {
-            continue;
-          } else if (args[i].equals("-count")) {
-            count = Integer.parseInt(args[++i]);
-          } else {
-            // file is required parameter
-            file = new Path(args[i]);
-          }
+        String usage = "Usage: RCFile " + "[-count N]" + " file";
+        if (args.length == 0) {
+            LOG.error(usage);
+            System.exit(-1);
         }
 
-        if (file == null) {
-          LOG.error(usage);
-          System.exit(-1);
+        try {
+            for (int i = 0; i < args.length; ++i) { // parse command line
+                if (args[i] == null) {
+                    continue;
+                } else if (args[i].equals("-count")) {
+                    count = Integer.parseInt(args[++i]);
+                } else {
+                    // file is required parameter
+                    file = new Path(args[i]);
+                }
+            }
+
+            if (file == null) {
+                LOG.error(usage);
+                System.exit(-1);
+            }
+
+            LOG.info("count = {}", count);
+            LOG.info("create = {}", create);
+            LOG.info("file = {}", file);
+
+            // test.performanceTest();
+            LOG.info("Finished.");
+        } finally {
+            fs.close();
         }
-
-        LOG.info("count = {}", count);
-        LOG.info("create = {}", create);
-        LOG.info("file = {}" ,file);
-
-           // test.performanceTest();
-        LOG.info("Finished.");
-      } finally {
-        fs.close();
-      }
     }
 
     private static Properties createProperties() {
-      Properties tbl = new Properties();
+        Properties tbl = new Properties();
 
-      // Set the configuration parameters
-      tbl.setProperty(Constants.SERIALIZATION_FORMAT, "9");
-      tbl.setProperty("columns",
-          "abyte,ashort,aint,along,adouble,astring,anullint,anullstring");
-      tbl.setProperty("columns.types",
-          "tinyint:smallint:int:bigint:double:string:int:string");
-      tbl.setProperty(Constants.SERIALIZATION_NULL_FORMAT, "NULL");
-      return tbl;
+        // Set the configuration parameters
+        tbl.setProperty(Constants.SERIALIZATION_FORMAT, "9");
+        tbl.setProperty("columns",
+            "abyte,ashort,aint,along,adouble,astring,anullint,anullstring");
+        tbl.setProperty("columns.types",
+            "tinyint:smallint:int:bigint:double:string:int:string");
+        tbl.setProperty(Constants.SERIALIZATION_NULL_FORMAT, "NULL");
+        return tbl;
     }
 
 
-
     public void testSynAndSplit() throws IOException, InterruptedException {
-      splitBeforeSync();
-      splitRightBeforeSync();
-      splitInMiddleOfSync();
-      splitRightAfterSync();
-      splitAfterSync();
+        splitBeforeSync();
+        splitRightBeforeSync();
+        splitInMiddleOfSync();
+        splitRightAfterSync();
+        splitAfterSync();
     }
 
-    private void splitBeforeSync() throws IOException,InterruptedException {
-      writeThenReadByRecordReader(600, 1000, 2, 17684, null);
+    private void splitBeforeSync() throws IOException, InterruptedException {
+        writeThenReadByRecordReader(600, 1000, 2, 17684, null);
     }
 
-    private void splitRightBeforeSync() throws IOException ,InterruptedException{
-      writeThenReadByRecordReader(500, 1000, 2, 17750, null);
+    private void splitRightBeforeSync() throws IOException, InterruptedException {
+        writeThenReadByRecordReader(500, 1000, 2, 17750, null);
     }
 
-    private void splitInMiddleOfSync() throws IOException,InterruptedException {
-      writeThenReadByRecordReader(500, 1000, 2, 17760, null);
+    private void splitInMiddleOfSync() throws IOException, InterruptedException {
+        writeThenReadByRecordReader(500, 1000, 2, 17760, null);
 
     }
 
     private void splitRightAfterSync() throws IOException, InterruptedException {
-      writeThenReadByRecordReader(500, 1000, 2, 17770, null);
+        writeThenReadByRecordReader(500, 1000, 2, 17770, null);
     }
 
-    private void splitAfterSync() throws IOException ,InterruptedException{
-      writeThenReadByRecordReader(500, 1000, 2, 19950, null);
+    private void splitAfterSync() throws IOException, InterruptedException {
+        writeThenReadByRecordReader(500, 1000, 2, 19950, null);
     }
 
     private void writeThenReadByRecordReader(int intervalRecordCount,
-        int writeCount, int splitNumber, long maxSplitSize, CompressionCodec codec)
+                                             int writeCount, int splitNumber, long maxSplitSize, CompressionCodec codec)
         throws IOException, InterruptedException {
-      Path testDir = new Path(System.getProperty("test.data.dir", ".")
-          + "/mapred/testsmallfirstsplit");
-      Path testFile = new Path(testDir, "test_rcfile");
-      fs.delete(testFile, true);
-      Configuration cloneConf = new Configuration(conf);
-      RCFileOutputFormat.setColumnNumber(cloneConf, bytesArray.length);
-      cloneConf.setInt(RCFile.RECORD_INTERVAL_CONF_STR, intervalRecordCount);
+        Path testDir = new Path(System.getProperty("test.data.dir", ".")
+            + "/mapred/testsmallfirstsplit");
+        Path testFile = new Path(testDir, "test_rcfile");
+        fs.delete(testFile, true);
+        Configuration cloneConf = new Configuration(conf);
+        RCFileOutputFormat.setColumnNumber(cloneConf, bytesArray.length);
+        cloneConf.setInt(RCFile.RECORD_INTERVAL_CONF_STR, intervalRecordCount);
 
-      RCFile.Writer writer = new RCFile.Writer(fs, cloneConf, testFile, null, codec);
+        RCFile.Writer writer = new RCFile.Writer(fs, cloneConf, testFile, null, codec);
 
-      BytesRefArrayWritable bytes = new BytesRefArrayWritable(bytesArray.length);
-      for (int i = 0; i < bytesArray.length; i++) {
-        BytesRefWritable cu = null;
-        cu = new BytesRefWritable(bytesArray[i], 0, bytesArray[i].length);
-        bytes.set(i, cu);
-      }
-      for (int i = 0; i < writeCount; i++) {
-        writer.append(bytes);
-      }
-      writer.close();
-
-      RCFileMapReduceInputFormat<LongWritable, BytesRefArrayWritable> inputFormat = new RCFileMapReduceInputFormat<LongWritable, BytesRefArrayWritable>();
-      Configuration jonconf = new Configuration(cloneConf);
-      jonconf.set("mapred.input.dir", testDir.toString());
-      JobContext context = new Job(jonconf);
-      context.getConfiguration().setLong("mapred.max.split.size",maxSplitSize);
-      List<InputSplit> splits = inputFormat.getSplits(context);
-      assertEquals("splits length should be " + splitNumber, splits.size(), splitNumber);
-      int readCount = 0;
-      for (int i = 0; i < splits.size(); i++) {
-        TaskAttemptContext tac = HCatHadoopShims.Instance.get().createTaskAttemptContext(jonconf, new TaskAttemptID());
-        RecordReader<LongWritable, BytesRefArrayWritable> rr = inputFormat.createRecordReader(splits.get(i), tac);
-        rr.initialize(splits.get(i), tac);
-        while (rr.nextKeyValue()) {
-          readCount++;
+        BytesRefArrayWritable bytes = new BytesRefArrayWritable(bytesArray.length);
+        for (int i = 0; i < bytesArray.length; i++) {
+            BytesRefWritable cu = null;
+            cu = new BytesRefWritable(bytesArray[i], 0, bytesArray[i].length);
+            bytes.set(i, cu);
         }
-      }
-      assertEquals("readCount should be equal to writeCount", readCount, writeCount);
+        for (int i = 0; i < writeCount; i++) {
+            writer.append(bytes);
+        }
+        writer.close();
+
+        RCFileMapReduceInputFormat<LongWritable, BytesRefArrayWritable> inputFormat = new RCFileMapReduceInputFormat<LongWritable, BytesRefArrayWritable>();
+        Configuration jonconf = new Configuration(cloneConf);
+        jonconf.set("mapred.input.dir", testDir.toString());
+        JobContext context = new Job(jonconf);
+        context.getConfiguration().setLong("mapred.max.split.size", maxSplitSize);
+        List<InputSplit> splits = inputFormat.getSplits(context);
+        assertEquals("splits length should be " + splitNumber, splits.size(), splitNumber);
+        int readCount = 0;
+        for (int i = 0; i < splits.size(); i++) {
+            TaskAttemptContext tac = HCatHadoopShims.Instance.get().createTaskAttemptContext(jonconf, new TaskAttemptID());
+            RecordReader<LongWritable, BytesRefArrayWritable> rr = inputFormat.createRecordReader(splits.get(i), tac);
+            rr.initialize(splits.get(i), tac);
+            while (rr.nextKeyValue()) {
+                readCount++;
+            }
+        }
+        assertEquals("readCount should be equal to writeCount", readCount, writeCount);
     }
 
-  }
+}
 
 
diff --git a/src/test/org/apache/hcatalog/security/TestHdfsAuthorizationProvider.java b/src/test/org/apache/hcatalog/security/TestHdfsAuthorizationProvider.java
index da808d4..68920fd 100644
--- a/src/test/org/apache/hcatalog/security/TestHdfsAuthorizationProvider.java
+++ b/src/test/org/apache/hcatalog/security/TestHdfsAuthorizationProvider.java
@@ -52,531 +52,531 @@
 import org.junit.Test;
 
 public class TestHdfsAuthorizationProvider {
-  
-  protected HCatDriver hcatDriver;
-  protected HiveMetaStoreClient msc;
-  protected HiveConf conf;
-  protected String whDir;
-  protected Path whPath;
-  protected FileSystem whFs;
-  protected Warehouse wh;
-  protected Hive hive;
 
-  @Before
-  public void setUp() throws Exception {
-    
-    conf = new HiveConf(this.getClass());
-    conf.set(ConfVars.PREEXECHOOKS.varname, "");
-    conf.set(ConfVars.POSTEXECHOOKS.varname, "");
-    conf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-  
-    conf.set("hive.metastore.local", "true");
-    conf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
-    conf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED, true);
-    conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, 
-        StorageDelegationAuthorizationProvider.class.getCanonicalName());
-    conf.set("fs.pfile.impl", "org.apache.hadoop.fs.ProxyLocalFileSystem");
-    
-    whDir = System.getProperty("test.warehouse.dir", "/tmp/testhdfsauthorization_wh");
-    conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, whDir);
-    
-    UserGroupInformation ugi = ShimLoader.getHadoopShims().getUGIForConf(conf);
-    String username = ShimLoader.getHadoopShims().getShortUserName(ugi); 
-    
-    whPath = new Path(whDir);
-    whFs = whPath.getFileSystem(conf);
-    
-    wh = new Warehouse(conf);
-    hive = Hive.get(conf);
-    
-    //clean up mess in HMS 
-    HcatTestUtils.cleanupHMS(hive, wh, perm700);
-    
-    whFs.delete(whPath, true);
-    whFs.mkdirs(whPath, perm755);
-    
-    SessionState.start(new CliSessionState(conf));
-    hcatDriver = new HCatDriver();
-  }
+    protected HCatDriver hcatDriver;
+    protected HiveMetaStoreClient msc;
+    protected HiveConf conf;
+    protected String whDir;
+    protected Path whPath;
+    protected FileSystem whFs;
+    protected Warehouse wh;
+    protected Hive hive;
 
-  @After
-  public void tearDown() throws IOException {
-    whFs.close();
-    hcatDriver.close();
-    Hive.closeCurrent();
-  }
+    @Before
+    public void setUp() throws Exception {
 
-  public Path getDbPath(String dbName) throws MetaException, HiveException {
-    return HcatTestUtils.getDbPath(hive, wh, dbName); 
-  }
-  
-  public Path getTablePath(String dbName, String tableName) throws HiveException {
-    Table table = hive.getTable(dbName, tableName);
-    return table.getPath();
-  }
+        conf = new HiveConf(this.getClass());
+        conf.set(ConfVars.PREEXECHOOKS.varname, "");
+        conf.set(ConfVars.POSTEXECHOOKS.varname, "");
+        conf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
 
-  public Path getPartPath(String partName, String dbName, String tableName) throws HiveException {
-    return new Path(getTablePath(dbName, tableName), partName);
-  }
+        conf.set("hive.metastore.local", "true");
+        conf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
+        conf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED, true);
+        conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
+                StorageDelegationAuthorizationProvider.class.getCanonicalName());
+        conf.set("fs.pfile.impl", "org.apache.hadoop.fs.ProxyLocalFileSystem");
 
-  /** Execute the query expecting success*/
-  public void exec(String format, Object ... args) throws Exception {
-    String command = String.format(format, args);
-    CommandProcessorResponse resp = hcatDriver.run(command);
-    Assert.assertEquals(resp.getErrorMessage(), 0, resp.getResponseCode());
-    Assert.assertEquals(resp.getErrorMessage(), null, resp.getErrorMessage());
-  }
+        whDir = System.getProperty("test.warehouse.dir", "/tmp/testhdfsauthorization_wh");
+        conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, whDir);
 
-  /** Execute the query expecting it to fail with AuthorizationException */
-  public void execFail(String format, Object ... args) throws Exception {
-    String command = String.format(format, args);
-    CommandProcessorResponse resp = hcatDriver.run(command);
-    Assert.assertNotSame(resp.getErrorMessage(), 0, resp.getResponseCode());
-    Assert.assertTrue((resp.getResponseCode() == 40000) || (resp.getResponseCode() == 403));
-    if(resp.getErrorMessage() != null){
-     Assert.assertTrue(resp.getErrorMessage().contains("org.apache.hadoop.security.AccessControlException"));
+        UserGroupInformation ugi = ShimLoader.getHadoopShims().getUGIForConf(conf);
+        String username = ShimLoader.getHadoopShims().getShortUserName(ugi);
+
+        whPath = new Path(whDir);
+        whFs = whPath.getFileSystem(conf);
+
+        wh = new Warehouse(conf);
+        hive = Hive.get(conf);
+
+        //clean up mess in HMS
+        HcatTestUtils.cleanupHMS(hive, wh, perm700);
+
+        whFs.delete(whPath, true);
+        whFs.mkdirs(whPath, perm755);
+
+        SessionState.start(new CliSessionState(conf));
+        hcatDriver = new HCatDriver();
     }
-  }
 
-  
-  /** 
-   * Tests whether the warehouse directory is writable by the current user (as defined by Hadoop)
-   */
-  @Test
-  public void testWarehouseIsWritable() throws Exception {
-    Path top = new Path(whPath, "_foobarbaz12_");
-    try {
-      whFs.mkdirs(top);
-    } finally {
-      whFs.delete(top, true);
+    @After
+    public void tearDown() throws IOException {
+        whFs.close();
+        hcatDriver.close();
+        Hive.closeCurrent();
     }
-  }
-  
-  @Test
-  public void testShowDatabases() throws Exception {
-    exec("CREATE DATABASE doo");
-    exec("SHOW DATABASES");
-    
-    whFs.setPermission(whPath, perm300); //revoke r
-    execFail("SHOW DATABASES");
-  }
-  
-  @Test
-  public void testDatabaseOps() throws Exception {
-    exec("SHOW TABLES");
-    exec("SHOW TABLE EXTENDED LIKE foo1");
-    
-    whFs.setPermission(whPath, perm700);
-    exec("CREATE DATABASE doo");
-    exec("DESCRIBE DATABASE doo");
-    exec("USE doo");
-    exec("SHOW TABLES");
-    exec("SHOW TABLE EXTENDED LIKE foo1");
-    exec("DROP DATABASE doo");
-    
-    //custom location
-    Path dbPath = new Path(whPath, new Random().nextInt() + "/mydb");
-    whFs.mkdirs(dbPath, perm700);
-    exec("CREATE DATABASE doo2 LOCATION '%s'", dbPath.toUri());
-    exec("DESCRIBE DATABASE doo2", dbPath.toUri());
-    exec("USE doo2");
-    exec("SHOW TABLES");
-    exec("SHOW TABLE EXTENDED LIKE foo1");
-    exec("DROP DATABASE doo2", dbPath.toUri());
-    
-    //custom non-existing location
-    exec("CREATE DATABASE doo3 LOCATION '%s/subpath'", dbPath.toUri());
-  }
-  
-  @Test
-  public void testCreateDatabaseFail1() throws Exception {
-    whFs.setPermission(whPath, perm500);
-    execFail("CREATE DATABASE doo"); //in the default location
-    
-    whFs.setPermission(whPath, perm555);
-    execFail("CREATE DATABASE doo2");
-  }
 
-  @Test
-  public void testCreateDatabaseFail2() throws Exception {
-    //custom location
-    Path dbPath = new Path(whPath, new Random().nextInt() + "/mydb");
-    
-    whFs.mkdirs(dbPath, perm700);
-    whFs.setPermission(dbPath, perm500);
-    execFail("CREATE DATABASE doo2 LOCATION '%s'", dbPath.toUri());
-  }
-  
-  @Test
-  public void testDropDatabaseFail1() throws Exception {
-    whFs.setPermission(whPath, perm700);
-    exec("CREATE DATABASE doo"); //in the default location
-    
-    whFs.setPermission(getDbPath("doo"), perm500); //revoke write
-    execFail("DROP DATABASE doo");
-  }
-  
-  @Test
-  public void testDropDatabaseFail2() throws Exception {
-    //custom location
-    Path dbPath = new Path(whPath, new Random().nextInt() + "/mydb");
-    
-    whFs.mkdirs(dbPath, perm700);
-    exec("CREATE DATABASE doo2 LOCATION '%s'", dbPath.toUri());
-    
-    whFs.setPermission(dbPath, perm500);
-    execFail("DROP DATABASE doo2");
-  }
-  
-  @Test
-  public void testDescSwitchDatabaseFail() throws Exception {
-    whFs.setPermission(whPath, perm700);
-    exec("CREATE DATABASE doo");
-    whFs.setPermission(getDbPath("doo"), perm300); //revoke read
-    execFail("DESCRIBE DATABASE doo");
-    execFail("USE doo");
-    
-    //custom location
-    Path dbPath = new Path(whPath, new Random().nextInt() + "/mydb");
-    whFs.mkdirs(dbPath, perm700);
-    exec("CREATE DATABASE doo2 LOCATION '%s'", dbPath.toUri());
-    whFs.mkdirs(dbPath, perm300); //revoke read
-    execFail("DESCRIBE DATABASE doo2", dbPath.toUri());
-    execFail("USE doo2");
-  }
-  
-  @Test 
-  public void testShowTablesFail() throws Exception {
-    whFs.setPermission(whPath, perm700);
-    exec("CREATE DATABASE doo");
-    exec("USE doo");
-    whFs.setPermission(getDbPath("doo"), perm300); //revoke read
-    execFail("SHOW TABLES");
-    execFail("SHOW TABLE EXTENDED LIKE foo1");
-  }
-  
-  @Test
-  public void testTableOps() throws Exception {
-    //default db
-    exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
-    exec("DESCRIBE foo1");
-    exec("DROP TABLE foo1");
-    
-    //default db custom location
-    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
-    whFs.mkdirs(tablePath, perm700);
-    exec("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
-    exec("DESCRIBE foo2");
-    exec("DROP TABLE foo2");
-    
-    //default db custom non existing location
-    exec("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s/subpath'", tablePath);
-    exec("DESCRIBE foo3");
-    exec("DROP TABLE foo3");
-    
-    //non default db
-    exec("CREATE DATABASE doo");
-    exec("USE doo");
-    
-    exec("CREATE TABLE foo4 (foo INT) STORED AS RCFILE");
-    exec("DESCRIBE foo4");
-    exec("DROP TABLE foo4");
-    
-    //non-default db custom location
-    tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
-    whFs.mkdirs(tablePath, perm700);
-    exec("CREATE EXTERNAL TABLE foo5 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
-    exec("DESCRIBE foo5");
-    exec("DROP TABLE foo5");
-    
-    //non-default db custom non existing location
-    exec("CREATE EXTERNAL TABLE foo6 (foo INT) STORED AS RCFILE LOCATION '%s/subpath'", tablePath);
-    exec("DESCRIBE foo6");
-    exec("DROP TABLE foo6");
-    
-    exec("DROP TABLE IF EXISTS foo_non_exists");
-    
-    exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
-    exec("DESCRIBE EXTENDED foo1");
-    exec("DESCRIBE FORMATTED foo1");
-    exec("DESCRIBE foo1.foo");
-    
-    //deep non-existing path for the table
-    tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
-    whFs.mkdirs(tablePath, perm700);
-    exec("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s/a/a/a/'", tablePath);
-  }
-  
-  @Test
-  public void testCreateTableFail1() throws Exception {
-    //default db
-    whFs.mkdirs(whPath, perm500); //revoke w
-    execFail("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
-  }
-  
-  @Test
-  public void testCreateTableFail2() throws Exception {
-    //default db custom location
-    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
-    whFs.mkdirs(tablePath, perm500);
-    execFail("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
-    
-    //default db custom non existing location
-    execFail("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s/subpath'", tablePath);
-  }
-  
-  @Test
-  public void testCreateTableFail3() throws Exception {
-    //non default db
-    exec("CREATE DATABASE doo");
-    whFs.setPermission(getDbPath("doo"), perm500);
+    public Path getDbPath(String dbName) throws MetaException, HiveException {
+        return HcatTestUtils.getDbPath(hive, wh, dbName);
+    }
 
-    execFail("CREATE TABLE doo.foo4 (foo INT) STORED AS RCFILE");
-    
-    //non-default db custom location, permission to write to tablePath, but not on db path
-    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
-    whFs.mkdirs(tablePath, perm700);
-    exec("USE doo");
-    execFail("CREATE EXTERNAL TABLE foo5 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
-  }
+    public Path getTablePath(String dbName, String tableName) throws HiveException {
+        Table table = hive.getTable(dbName, tableName);
+        return table.getPath();
+    }
 
-  @Test
-  public void testCreateTableFail4() throws Exception {
-    //non default db
-    exec("CREATE DATABASE doo");
+    public Path getPartPath(String partName, String dbName, String tableName) throws HiveException {
+        return new Path(getTablePath(dbName, tableName), partName);
+    }
 
-    //non-default db custom location
-    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
-    whFs.mkdirs(tablePath, perm500);
-    execFail("CREATE EXTERNAL TABLE doo.foo5 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
-    
-    //non-default db custom non existing location
-    execFail("CREATE EXTERNAL TABLE doo.foo6 (foo INT) STORED AS RCFILE LOCATION '%s/a/a/a/'", tablePath);
-  }
-  
-  @Test
-  public void testDropTableFail1() throws Exception {
-    //default db
-    exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
-    whFs.mkdirs(getTablePath("default", "foo1"), perm500); //revoke w
-    execFail("DROP TABLE foo1");
-  }
-  
-  @Test
-  public void testDropTableFail2() throws Exception {
-    //default db custom location
-    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
-    exec("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
-    whFs.mkdirs(tablePath, perm500);
-    execFail("DROP TABLE foo2");
-  }
+    /** Execute the query expecting success*/
+    public void exec(String format, Object... args) throws Exception {
+        String command = String.format(format, args);
+        CommandProcessorResponse resp = hcatDriver.run(command);
+        Assert.assertEquals(resp.getErrorMessage(), 0, resp.getResponseCode());
+        Assert.assertEquals(resp.getErrorMessage(), null, resp.getErrorMessage());
+    }
 
-  @Test
-  public void testDropTableFail4() throws Exception {
-    //non default db
-    exec("CREATE DATABASE doo");
+    /** Execute the query expecting it to fail with AuthorizationException */
+    public void execFail(String format, Object... args) throws Exception {
+        String command = String.format(format, args);
+        CommandProcessorResponse resp = hcatDriver.run(command);
+        Assert.assertNotSame(resp.getErrorMessage(), 0, resp.getResponseCode());
+        Assert.assertTrue((resp.getResponseCode() == 40000) || (resp.getResponseCode() == 403));
+        if (resp.getErrorMessage() != null) {
+            Assert.assertTrue(resp.getErrorMessage().contains("org.apache.hadoop.security.AccessControlException"));
+        }
+    }
 
-    //non-default db custom location
-    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
-    
-    exec("CREATE EXTERNAL TABLE doo.foo5 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
-    whFs.mkdirs(tablePath, perm500);
-    exec("USE doo"); //There is no DROP TABLE doo.foo5 support in Hive
-    execFail("DROP TABLE foo5");
-  }
-  
-  @Test
-  public void testDescTableFail() throws Exception {
-    //default db
-    exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
-    whFs.mkdirs(getTablePath("default", "foo1"), perm300); //revoke read
-    execFail("DESCRIBE foo1");
-    
-    //default db custom location
-    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
-    whFs.mkdirs(tablePath, perm700);
-    exec("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
-    whFs.mkdirs(tablePath, perm300); //revoke read
-    execFail("DESCRIBE foo2");
-  }
-  
-  @Test
-  public void testAlterTableRename() throws Exception {
-    exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
-    exec("ALTER TABLE foo1 RENAME TO foo2");
-    
-    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
-    exec("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
-    exec("ALTER TABLE foo3 RENAME TO foo4");
-  }
-  
-  @Test
-  public void testAlterTableRenameFail() throws Exception {
-    exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
-    whFs.mkdirs(getTablePath("default", "foo1"), perm500); //revoke write
-    execFail("ALTER TABLE foo1 RENAME TO foo2");
-    
-    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
-    exec("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
-    whFs.mkdirs(tablePath, perm500); //revoke write 
-    execFail("ALTER TABLE foo3 RENAME TO foo4");
-  }
-  
-  @Test
-  public void testAlterTableRelocate() throws Exception {
-    exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
-    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
-    exec("ALTER TABLE foo1 SET LOCATION '%s'", tablePath.makeQualified(whFs));
-    
-    tablePath = new Path(whPath, new Random().nextInt() + "/mytable2");
-    exec("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s'", 
-        tablePath.makeQualified(whFs));
-    tablePath = new Path(whPath, new Random().nextInt() + "/mytable2");
-    exec("ALTER TABLE foo3 SET LOCATION '%s'", tablePath.makeQualified(whFs));
-  }
-  
-  @Test
-  public void testAlterTableRelocateFail() throws Exception {
-    exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
-    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
-    whFs.mkdirs(tablePath, perm500); //revoke write
-    execFail("ALTER TABLE foo1 SET LOCATION '%s'", tablePath.makeQualified(whFs));
-    
-    //dont have access to new table loc
-    tablePath = new Path(whPath, new Random().nextInt() + "/mytable2");
-    exec("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s'", 
-        tablePath.makeQualified(whFs));
-    tablePath = new Path(whPath, new Random().nextInt() + "/mytable2");
-    whFs.mkdirs(tablePath, perm500); //revoke write
-    execFail("ALTER TABLE foo3 SET LOCATION '%s'", tablePath.makeQualified(whFs));
-    
-    //have access to new table loc, but not old table loc 
-    tablePath = new Path(whPath, new Random().nextInt() + "/mytable3");
-    exec("CREATE EXTERNAL TABLE foo4 (foo INT) STORED AS RCFILE LOCATION '%s'", 
-        tablePath.makeQualified(whFs));
-    whFs.mkdirs(tablePath, perm500); //revoke write
-    tablePath = new Path(whPath, new Random().nextInt() + "/mytable3");
-    execFail("ALTER TABLE foo4 SET LOCATION '%s'", tablePath.makeQualified(whFs));
-  }
-  
-  @Test
-  public void testAlterTable() throws Exception {
-    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE");
-    exec("ALTER TABLE foo1 SET TBLPROPERTIES ('foo'='bar')");
-    exec("ALTER TABLE foo1 SET SERDEPROPERTIES ('foo'='bar')");
-    exec("ALTER TABLE foo1 ADD COLUMNS (foo2 INT)");
-  }
-  
-  @Test
-  public void testAddDropPartition() throws Exception {
-    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE");
-    exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10')");
-    exec("ALTER TABLE foo1 ADD IF NOT EXISTS PARTITION (b='2010-10-10')");
-    String relPath = new Random().nextInt() + "/mypart";
-    exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-11') LOCATION '%s'", relPath);
-    
-    exec("ALTER TABLE foo1 PARTITION (b='2010-10-10') SET FILEFORMAT RCFILE");
-    
-    exec("ALTER TABLE foo1 PARTITION (b='2010-10-10') SET FILEFORMAT INPUTFORMAT "
-        + "'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT "
-        + "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver "
-        + "'mydriver' outputdriver 'yourdriver'");    
-    
-    exec("ALTER TABLE foo1 DROP PARTITION (b='2010-10-10')");
-    exec("ALTER TABLE foo1 DROP PARTITION (b='2010-10-11')");
-  }
-  
-  @Test
-  public void testAddPartitionFail1() throws Exception {
-    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE");
-    whFs.mkdirs(getTablePath("default", "foo1"), perm500);
-    execFail("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10')");
-  }
-  
-  @Test
-  public void testAddPartitionFail2() throws Exception {
-    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE");
-    String relPath = new Random().nextInt() + "/mypart";
-    Path partPath = new Path(getTablePath("default", "foo1"), relPath);
-    whFs.mkdirs(partPath, perm500);
-    exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10') LOCATION '%s'", partPath);
-  }
-  
-  @Test
-  public void testDropPartitionFail1() throws Exception {
-    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE");
-    exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10')");
-    whFs.mkdirs(getPartPath("b=2010-10-10", "default", "foo1"), perm500);
-    execFail("ALTER TABLE foo1 DROP PARTITION (b='2010-10-10')");
-  }
 
-  @Test
-  public void testDropPartitionFail2() throws Exception {
-    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE");
-    String relPath = new Random().nextInt() + "/mypart";
-    Path partPath = new Path(getTablePath("default", "foo1"), relPath);
-    whFs.mkdirs(partPath, perm700);
-    exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10') LOCATION '%s'", partPath);
-    whFs.mkdirs(partPath, perm500); //revoke write
-    execFail("ALTER TABLE foo1 DROP PARTITION (b='2010-10-10')");
-  }
-  
-  @Test
-  public void testAlterTableFail() throws Exception {
-    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (boo STRING) STORED AS TEXTFILE");
-    whFs.mkdirs(getTablePath("default", "foo1"), perm500); //revoke write
-    execFail("ALTER TABLE foo1 SET TBLPROPERTIES ('foo'='bar')");
-    execFail("ALTER TABLE foo1 SET SERDEPROPERTIES ('foo'='bar')");
-    execFail("ALTER TABLE foo1 ADD COLUMNS (foo2 INT)");
-  }
-  
-  @Test
-  public void testShowTables() throws Exception {
-    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (boo STRING) STORED AS TEXTFILE");
-    exec("SHOW PARTITIONS foo1");
-    
-    whFs.mkdirs(getTablePath("default", "foo1"), perm300); //revoke read
-    execFail("SHOW PARTITIONS foo1");
-  }
-  
-  @Test
-  public void testAlterTablePartRename() throws Exception {
-    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS RCFILE");
-    Path loc = new Path(whPath, new Random().nextInt() + "/mypart");
-    exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-16') LOCATION '%s'", loc);
-    exec("ALTER TABLE foo1 PARTITION (b='2010-10-16') RENAME TO PARTITION (b='2010-10-17')");
-  }
-  
-  @Test
-  public void testAlterTablePartRenameFail() throws Exception {
-    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS RCFILE");
-    Path loc = new Path(whPath, new Random().nextInt() + "/mypart");
-    exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-16') LOCATION '%s'", loc);
-    whFs.setPermission(loc, perm500); //revoke w
-    execFail("ALTER TABLE foo1 PARTITION (b='2010-10-16') RENAME TO PARTITION (b='2010-10-17')");
-  }
-  
-  @Test
-  public void testAlterTablePartRelocate() throws Exception {
-    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS RCFILE");
-    exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-16')");
-    Path partPath = new Path(whPath, new Random().nextInt() + "/mypart");
-    exec("ALTER TABLE foo1 PARTITION (b='2010-10-16') SET LOCATION '%s'", partPath.makeQualified(whFs));
-  }
+    /**
+     * Tests whether the warehouse directory is writable by the current user (as defined by Hadoop)
+     */
+    @Test
+    public void testWarehouseIsWritable() throws Exception {
+        Path top = new Path(whPath, "_foobarbaz12_");
+        try {
+            whFs.mkdirs(top);
+        } finally {
+            whFs.delete(top, true);
+        }
+    }
 
-  @Test
-  public void testAlterTablePartRelocateFail() throws Exception {
-    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS RCFILE");
-    
-    Path oldLoc = new Path(whPath, new Random().nextInt() + "/mypart");
-    Path newLoc = new Path(whPath, new Random().nextInt() + "/mypart2");
-    
-    exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-16') LOCATION '%s'", oldLoc);
-    whFs.mkdirs(oldLoc, perm500);
-    execFail("ALTER TABLE foo1 PARTITION (b='2010-10-16') SET LOCATION '%s'", newLoc.makeQualified(whFs));
-    whFs.mkdirs(oldLoc, perm700);
-    whFs.mkdirs(newLoc, perm500);
-    execFail("ALTER TABLE foo1 PARTITION (b='2010-10-16') SET LOCATION '%s'", newLoc.makeQualified(whFs));
-  }
-  
+    @Test
+    public void testShowDatabases() throws Exception {
+        exec("CREATE DATABASE doo");
+        exec("SHOW DATABASES");
+
+        whFs.setPermission(whPath, perm300); //revoke r
+        execFail("SHOW DATABASES");
+    }
+
+    @Test
+    public void testDatabaseOps() throws Exception {
+        exec("SHOW TABLES");
+        exec("SHOW TABLE EXTENDED LIKE foo1");
+
+        whFs.setPermission(whPath, perm700);
+        exec("CREATE DATABASE doo");
+        exec("DESCRIBE DATABASE doo");
+        exec("USE doo");
+        exec("SHOW TABLES");
+        exec("SHOW TABLE EXTENDED LIKE foo1");
+        exec("DROP DATABASE doo");
+
+        //custom location
+        Path dbPath = new Path(whPath, new Random().nextInt() + "/mydb");
+        whFs.mkdirs(dbPath, perm700);
+        exec("CREATE DATABASE doo2 LOCATION '%s'", dbPath.toUri());
+        exec("DESCRIBE DATABASE doo2", dbPath.toUri());
+        exec("USE doo2");
+        exec("SHOW TABLES");
+        exec("SHOW TABLE EXTENDED LIKE foo1");
+        exec("DROP DATABASE doo2", dbPath.toUri());
+
+        //custom non-existing location
+        exec("CREATE DATABASE doo3 LOCATION '%s/subpath'", dbPath.toUri());
+    }
+
+    @Test
+    public void testCreateDatabaseFail1() throws Exception {
+        whFs.setPermission(whPath, perm500);
+        execFail("CREATE DATABASE doo"); //in the default location
+
+        whFs.setPermission(whPath, perm555);
+        execFail("CREATE DATABASE doo2");
+    }
+
+    @Test
+    public void testCreateDatabaseFail2() throws Exception {
+        //custom location
+        Path dbPath = new Path(whPath, new Random().nextInt() + "/mydb");
+
+        whFs.mkdirs(dbPath, perm700);
+        whFs.setPermission(dbPath, perm500);
+        execFail("CREATE DATABASE doo2 LOCATION '%s'", dbPath.toUri());
+    }
+
+    @Test
+    public void testDropDatabaseFail1() throws Exception {
+        whFs.setPermission(whPath, perm700);
+        exec("CREATE DATABASE doo"); //in the default location
+
+        whFs.setPermission(getDbPath("doo"), perm500); //revoke write
+        execFail("DROP DATABASE doo");
+    }
+
+    @Test
+    public void testDropDatabaseFail2() throws Exception {
+        //custom location
+        Path dbPath = new Path(whPath, new Random().nextInt() + "/mydb");
+
+        whFs.mkdirs(dbPath, perm700);
+        exec("CREATE DATABASE doo2 LOCATION '%s'", dbPath.toUri());
+
+        whFs.setPermission(dbPath, perm500);
+        execFail("DROP DATABASE doo2");
+    }
+
+    @Test
+    public void testDescSwitchDatabaseFail() throws Exception {
+        whFs.setPermission(whPath, perm700);
+        exec("CREATE DATABASE doo");
+        whFs.setPermission(getDbPath("doo"), perm300); //revoke read
+        execFail("DESCRIBE DATABASE doo");
+        execFail("USE doo");
+
+        //custom location
+        Path dbPath = new Path(whPath, new Random().nextInt() + "/mydb");
+        whFs.mkdirs(dbPath, perm700);
+        exec("CREATE DATABASE doo2 LOCATION '%s'", dbPath.toUri());
+        whFs.mkdirs(dbPath, perm300); //revoke read
+        execFail("DESCRIBE DATABASE doo2", dbPath.toUri());
+        execFail("USE doo2");
+    }
+
+    @Test
+    public void testShowTablesFail() throws Exception {
+        whFs.setPermission(whPath, perm700);
+        exec("CREATE DATABASE doo");
+        exec("USE doo");
+        whFs.setPermission(getDbPath("doo"), perm300); //revoke read
+        execFail("SHOW TABLES");
+        execFail("SHOW TABLE EXTENDED LIKE foo1");
+    }
+
+    @Test
+    public void testTableOps() throws Exception {
+        //default db
+        exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
+        exec("DESCRIBE foo1");
+        exec("DROP TABLE foo1");
+
+        //default db custom location
+        Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+        whFs.mkdirs(tablePath, perm700);
+        exec("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+        exec("DESCRIBE foo2");
+        exec("DROP TABLE foo2");
+
+        //default db custom non existing location
+        exec("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s/subpath'", tablePath);
+        exec("DESCRIBE foo3");
+        exec("DROP TABLE foo3");
+
+        //non default db
+        exec("CREATE DATABASE doo");
+        exec("USE doo");
+
+        exec("CREATE TABLE foo4 (foo INT) STORED AS RCFILE");
+        exec("DESCRIBE foo4");
+        exec("DROP TABLE foo4");
+
+        //non-default db custom location
+        tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+        whFs.mkdirs(tablePath, perm700);
+        exec("CREATE EXTERNAL TABLE foo5 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+        exec("DESCRIBE foo5");
+        exec("DROP TABLE foo5");
+
+        //non-default db custom non existing location
+        exec("CREATE EXTERNAL TABLE foo6 (foo INT) STORED AS RCFILE LOCATION '%s/subpath'", tablePath);
+        exec("DESCRIBE foo6");
+        exec("DROP TABLE foo6");
+
+        exec("DROP TABLE IF EXISTS foo_non_exists");
+
+        exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
+        exec("DESCRIBE EXTENDED foo1");
+        exec("DESCRIBE FORMATTED foo1");
+        exec("DESCRIBE foo1.foo");
+
+        //deep non-existing path for the table
+        tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+        whFs.mkdirs(tablePath, perm700);
+        exec("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s/a/a/a/'", tablePath);
+    }
+
+    @Test
+    public void testCreateTableFail1() throws Exception {
+        //default db
+        whFs.mkdirs(whPath, perm500); //revoke w
+        execFail("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
+    }
+
+    @Test
+    public void testCreateTableFail2() throws Exception {
+        //default db custom location
+        Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+        whFs.mkdirs(tablePath, perm500);
+        execFail("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+
+        //default db custom non existing location
+        execFail("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s/subpath'", tablePath);
+    }
+
+    @Test
+    public void testCreateTableFail3() throws Exception {
+        //non default db
+        exec("CREATE DATABASE doo");
+        whFs.setPermission(getDbPath("doo"), perm500);
+
+        execFail("CREATE TABLE doo.foo4 (foo INT) STORED AS RCFILE");
+
+        //non-default db custom location, permission to write to tablePath, but not on db path
+        Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+        whFs.mkdirs(tablePath, perm700);
+        exec("USE doo");
+        execFail("CREATE EXTERNAL TABLE foo5 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+    }
+
+    @Test
+    public void testCreateTableFail4() throws Exception {
+        //non default db
+        exec("CREATE DATABASE doo");
+
+        //non-default db custom location
+        Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+        whFs.mkdirs(tablePath, perm500);
+        execFail("CREATE EXTERNAL TABLE doo.foo5 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+
+        //non-default db custom non existing location
+        execFail("CREATE EXTERNAL TABLE doo.foo6 (foo INT) STORED AS RCFILE LOCATION '%s/a/a/a/'", tablePath);
+    }
+
+    @Test
+    public void testDropTableFail1() throws Exception {
+        //default db
+        exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
+        whFs.mkdirs(getTablePath("default", "foo1"), perm500); //revoke w
+        execFail("DROP TABLE foo1");
+    }
+
+    @Test
+    public void testDropTableFail2() throws Exception {
+        //default db custom location
+        Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+        exec("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+        whFs.mkdirs(tablePath, perm500);
+        execFail("DROP TABLE foo2");
+    }
+
+    @Test
+    public void testDropTableFail4() throws Exception {
+        //non default db
+        exec("CREATE DATABASE doo");
+
+        //non-default db custom location
+        Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+
+        exec("CREATE EXTERNAL TABLE doo.foo5 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+        whFs.mkdirs(tablePath, perm500);
+        exec("USE doo"); //There is no DROP TABLE doo.foo5 support in Hive
+        execFail("DROP TABLE foo5");
+    }
+
+    @Test
+    public void testDescTableFail() throws Exception {
+        //default db
+        exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
+        whFs.mkdirs(getTablePath("default", "foo1"), perm300); //revoke read
+        execFail("DESCRIBE foo1");
+
+        //default db custom location
+        Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+        whFs.mkdirs(tablePath, perm700);
+        exec("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+        whFs.mkdirs(tablePath, perm300); //revoke read
+        execFail("DESCRIBE foo2");
+    }
+
+    @Test
+    public void testAlterTableRename() throws Exception {
+        exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
+        exec("ALTER TABLE foo1 RENAME TO foo2");
+
+        Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+        exec("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+        exec("ALTER TABLE foo3 RENAME TO foo4");
+    }
+
+    @Test
+    public void testAlterTableRenameFail() throws Exception {
+        exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
+        whFs.mkdirs(getTablePath("default", "foo1"), perm500); //revoke write
+        execFail("ALTER TABLE foo1 RENAME TO foo2");
+
+        Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+        exec("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+        whFs.mkdirs(tablePath, perm500); //revoke write
+        execFail("ALTER TABLE foo3 RENAME TO foo4");
+    }
+
+    @Test
+    public void testAlterTableRelocate() throws Exception {
+        exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
+        Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+        exec("ALTER TABLE foo1 SET LOCATION '%s'", tablePath.makeQualified(whFs));
+
+        tablePath = new Path(whPath, new Random().nextInt() + "/mytable2");
+        exec("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s'",
+                tablePath.makeQualified(whFs));
+        tablePath = new Path(whPath, new Random().nextInt() + "/mytable2");
+        exec("ALTER TABLE foo3 SET LOCATION '%s'", tablePath.makeQualified(whFs));
+    }
+
+    @Test
+    public void testAlterTableRelocateFail() throws Exception {
+        exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
+        Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+        whFs.mkdirs(tablePath, perm500); //revoke write
+        execFail("ALTER TABLE foo1 SET LOCATION '%s'", tablePath.makeQualified(whFs));
+
+        //dont have access to new table loc
+        tablePath = new Path(whPath, new Random().nextInt() + "/mytable2");
+        exec("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s'",
+                tablePath.makeQualified(whFs));
+        tablePath = new Path(whPath, new Random().nextInt() + "/mytable2");
+        whFs.mkdirs(tablePath, perm500); //revoke write
+        execFail("ALTER TABLE foo3 SET LOCATION '%s'", tablePath.makeQualified(whFs));
+
+        //have access to new table loc, but not old table loc
+        tablePath = new Path(whPath, new Random().nextInt() + "/mytable3");
+        exec("CREATE EXTERNAL TABLE foo4 (foo INT) STORED AS RCFILE LOCATION '%s'",
+                tablePath.makeQualified(whFs));
+        whFs.mkdirs(tablePath, perm500); //revoke write
+        tablePath = new Path(whPath, new Random().nextInt() + "/mytable3");
+        execFail("ALTER TABLE foo4 SET LOCATION '%s'", tablePath.makeQualified(whFs));
+    }
+
+    @Test
+    public void testAlterTable() throws Exception {
+        exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE");
+        exec("ALTER TABLE foo1 SET TBLPROPERTIES ('foo'='bar')");
+        exec("ALTER TABLE foo1 SET SERDEPROPERTIES ('foo'='bar')");
+        exec("ALTER TABLE foo1 ADD COLUMNS (foo2 INT)");
+    }
+
+    @Test
+    public void testAddDropPartition() throws Exception {
+        exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE");
+        exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10')");
+        exec("ALTER TABLE foo1 ADD IF NOT EXISTS PARTITION (b='2010-10-10')");
+        String relPath = new Random().nextInt() + "/mypart";
+        exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-11') LOCATION '%s'", relPath);
+
+        exec("ALTER TABLE foo1 PARTITION (b='2010-10-10') SET FILEFORMAT RCFILE");
+
+        exec("ALTER TABLE foo1 PARTITION (b='2010-10-10') SET FILEFORMAT INPUTFORMAT "
+                + "'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT "
+                + "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver "
+                + "'mydriver' outputdriver 'yourdriver'");
+
+        exec("ALTER TABLE foo1 DROP PARTITION (b='2010-10-10')");
+        exec("ALTER TABLE foo1 DROP PARTITION (b='2010-10-11')");
+    }
+
+    @Test
+    public void testAddPartitionFail1() throws Exception {
+        exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE");
+        whFs.mkdirs(getTablePath("default", "foo1"), perm500);
+        execFail("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10')");
+    }
+
+    @Test
+    public void testAddPartitionFail2() throws Exception {
+        exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE");
+        String relPath = new Random().nextInt() + "/mypart";
+        Path partPath = new Path(getTablePath("default", "foo1"), relPath);
+        whFs.mkdirs(partPath, perm500);
+        exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10') LOCATION '%s'", partPath);
+    }
+
+    @Test
+    public void testDropPartitionFail1() throws Exception {
+        exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE");
+        exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10')");
+        whFs.mkdirs(getPartPath("b=2010-10-10", "default", "foo1"), perm500);
+        execFail("ALTER TABLE foo1 DROP PARTITION (b='2010-10-10')");
+    }
+
+    @Test
+    public void testDropPartitionFail2() throws Exception {
+        exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE");
+        String relPath = new Random().nextInt() + "/mypart";
+        Path partPath = new Path(getTablePath("default", "foo1"), relPath);
+        whFs.mkdirs(partPath, perm700);
+        exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10') LOCATION '%s'", partPath);
+        whFs.mkdirs(partPath, perm500); //revoke write
+        execFail("ALTER TABLE foo1 DROP PARTITION (b='2010-10-10')");
+    }
+
+    @Test
+    public void testAlterTableFail() throws Exception {
+        exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (boo STRING) STORED AS TEXTFILE");
+        whFs.mkdirs(getTablePath("default", "foo1"), perm500); //revoke write
+        execFail("ALTER TABLE foo1 SET TBLPROPERTIES ('foo'='bar')");
+        execFail("ALTER TABLE foo1 SET SERDEPROPERTIES ('foo'='bar')");
+        execFail("ALTER TABLE foo1 ADD COLUMNS (foo2 INT)");
+    }
+
+    @Test
+    public void testShowTables() throws Exception {
+        exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (boo STRING) STORED AS TEXTFILE");
+        exec("SHOW PARTITIONS foo1");
+
+        whFs.mkdirs(getTablePath("default", "foo1"), perm300); //revoke read
+        execFail("SHOW PARTITIONS foo1");
+    }
+
+    @Test
+    public void testAlterTablePartRename() throws Exception {
+        exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS RCFILE");
+        Path loc = new Path(whPath, new Random().nextInt() + "/mypart");
+        exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-16') LOCATION '%s'", loc);
+        exec("ALTER TABLE foo1 PARTITION (b='2010-10-16') RENAME TO PARTITION (b='2010-10-17')");
+    }
+
+    @Test
+    public void testAlterTablePartRenameFail() throws Exception {
+        exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS RCFILE");
+        Path loc = new Path(whPath, new Random().nextInt() + "/mypart");
+        exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-16') LOCATION '%s'", loc);
+        whFs.setPermission(loc, perm500); //revoke w
+        execFail("ALTER TABLE foo1 PARTITION (b='2010-10-16') RENAME TO PARTITION (b='2010-10-17')");
+    }
+
+    @Test
+    public void testAlterTablePartRelocate() throws Exception {
+        exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS RCFILE");
+        exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-16')");
+        Path partPath = new Path(whPath, new Random().nextInt() + "/mypart");
+        exec("ALTER TABLE foo1 PARTITION (b='2010-10-16') SET LOCATION '%s'", partPath.makeQualified(whFs));
+    }
+
+    @Test
+    public void testAlterTablePartRelocateFail() throws Exception {
+        exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS RCFILE");
+
+        Path oldLoc = new Path(whPath, new Random().nextInt() + "/mypart");
+        Path newLoc = new Path(whPath, new Random().nextInt() + "/mypart2");
+
+        exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-16') LOCATION '%s'", oldLoc);
+        whFs.mkdirs(oldLoc, perm500);
+        execFail("ALTER TABLE foo1 PARTITION (b='2010-10-16') SET LOCATION '%s'", newLoc.makeQualified(whFs));
+        whFs.mkdirs(oldLoc, perm700);
+        whFs.mkdirs(newLoc, perm500);
+        execFail("ALTER TABLE foo1 PARTITION (b='2010-10-16') SET LOCATION '%s'", newLoc.makeQualified(whFs));
+    }
+
 }
diff --git a/storage-handlers/hbase/src/gen-java/org/apache/hcatalog/hbase/snapshot/transaction/thrift/StoreFamilyRevision.java b/storage-handlers/hbase/src/gen-java/org/apache/hcatalog/hbase/snapshot/transaction/thrift/StoreFamilyRevision.java
index 34096ac..a5d8213 100644
--- a/storage-handlers/hbase/src/gen-java/org/apache/hcatalog/hbase/snapshot/transaction/thrift/StoreFamilyRevision.java
+++ b/storage-handlers/hbase/src/gen-java/org/apache/hcatalog/hbase/snapshot/transaction/thrift/StoreFamilyRevision.java
@@ -35,383 +35,382 @@
 import java.util.BitSet;
 
 public class StoreFamilyRevision implements org.apache.thrift.TBase<StoreFamilyRevision, StoreFamilyRevision._Fields>, java.io.Serializable, Cloneable {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("StoreFamilyRevision");
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("StoreFamilyRevision");
 
-  private static final org.apache.thrift.protocol.TField REVISION_FIELD_DESC = new org.apache.thrift.protocol.TField("revision", org.apache.thrift.protocol.TType.I64, (short)1);
-  private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)2);
+    private static final org.apache.thrift.protocol.TField REVISION_FIELD_DESC = new org.apache.thrift.protocol.TField("revision", org.apache.thrift.protocol.TType.I64, (short) 1);
+    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short) 2);
 
-  public long revision; // required
-  public long timestamp; // required
+    public long revision; // required
+    public long timestamp; // required
 
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    REVISION((short)1, "revision"),
-    TIMESTAMP((short)2, "timestamp");
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+        REVISION((short) 1, "revision"),
+        TIMESTAMP((short) 2, "timestamp");
 
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+        private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+        static {
+            for (_Fields field : EnumSet.allOf(_Fields.class)) {
+                byName.put(field.getFieldName(), field);
+            }
+        }
+
+        /**
+         * Find the _Fields constant that matches fieldId, or null if its not found.
+         */
+        public static _Fields findByThriftId(int fieldId) {
+            switch (fieldId) {
+            case 1: // REVISION
+                return REVISION;
+            case 2: // TIMESTAMP
+                return TIMESTAMP;
+            default:
+                return null;
+            }
+        }
+
+        /**
+         * Find the _Fields constant that matches fieldId, throwing an exception
+         * if it is not found.
+         */
+        public static _Fields findByThriftIdOrThrow(int fieldId) {
+            _Fields fields = findByThriftId(fieldId);
+            if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+            return fields;
+        }
+
+        /**
+         * Find the _Fields constant that matches name, or null if its not found.
+         */
+        public static _Fields findByName(String name) {
+            return byName.get(name);
+        }
+
+        private final short _thriftId;
+        private final String _fieldName;
+
+        _Fields(short thriftId, String fieldName) {
+            _thriftId = thriftId;
+            _fieldName = fieldName;
+        }
+
+        public short getThriftFieldId() {
+            return _thriftId;
+        }
+
+        public String getFieldName() {
+            return _fieldName;
+        }
+    }
+
+    // isset id assignments
+    private static final int __REVISION_ISSET_ID = 0;
+    private static final int __TIMESTAMP_ISSET_ID = 1;
+    private BitSet __isset_bit_vector = new BitSet(2);
+
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
 
     static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
+        Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+        tmpMap.put(_Fields.REVISION, new org.apache.thrift.meta_data.FieldMetaData("revision", org.apache.thrift.TFieldRequirementType.DEFAULT,
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+        tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.DEFAULT,
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+        metaDataMap = Collections.unmodifiableMap(tmpMap);
+        org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(StoreFamilyRevision.class, metaDataMap);
+    }
+
+    public StoreFamilyRevision() {
+    }
+
+    public StoreFamilyRevision(
+        long revision,
+        long timestamp) {
+        this();
+        this.revision = revision;
+        setRevisionIsSet(true);
+        this.timestamp = timestamp;
+        setTimestampIsSet(true);
     }
 
     /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
+     * Performs a deep copy on <i>other</i>.
      */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // REVISION
-          return REVISION;
-        case 2: // TIMESTAMP
-          return TIMESTAMP;
-        default:
-          return null;
-      }
+    public StoreFamilyRevision(StoreFamilyRevision other) {
+        __isset_bit_vector.clear();
+        __isset_bit_vector.or(other.__isset_bit_vector);
+        this.revision = other.revision;
+        this.timestamp = other.timestamp;
     }
 
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
+    public StoreFamilyRevision deepCopy() {
+        return new StoreFamilyRevision(this);
     }
 
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
+    @Override
+    public void clear() {
+        setRevisionIsSet(false);
+        this.revision = 0;
+        setTimestampIsSet(false);
+        this.timestamp = 0;
     }
 
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
+    public long getRevision() {
+        return this.revision;
     }
 
-    public short getThriftFieldId() {
-      return _thriftId;
+    public StoreFamilyRevision setRevision(long revision) {
+        this.revision = revision;
+        setRevisionIsSet(true);
+        return this;
     }
 
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  private static final int __REVISION_ISSET_ID = 0;
-  private static final int __TIMESTAMP_ISSET_ID = 1;
-  private BitSet __isset_bit_vector = new BitSet(2);
-
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.REVISION, new org.apache.thrift.meta_data.FieldMetaData("revision", org.apache.thrift.TFieldRequirementType.DEFAULT,
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.DEFAULT,
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(StoreFamilyRevision.class, metaDataMap);
-  }
-
-  public StoreFamilyRevision() {
-  }
-
-  public StoreFamilyRevision(
-    long revision,
-    long timestamp)
-  {
-    this();
-    this.revision = revision;
-    setRevisionIsSet(true);
-    this.timestamp = timestamp;
-    setTimestampIsSet(true);
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public StoreFamilyRevision(StoreFamilyRevision other) {
-    __isset_bit_vector.clear();
-    __isset_bit_vector.or(other.__isset_bit_vector);
-    this.revision = other.revision;
-    this.timestamp = other.timestamp;
-  }
-
-  public StoreFamilyRevision deepCopy() {
-    return new StoreFamilyRevision(this);
-  }
-
-  @Override
-  public void clear() {
-    setRevisionIsSet(false);
-    this.revision = 0;
-    setTimestampIsSet(false);
-    this.timestamp = 0;
-  }
-
-  public long getRevision() {
-    return this.revision;
-  }
-
-  public StoreFamilyRevision setRevision(long revision) {
-    this.revision = revision;
-    setRevisionIsSet(true);
-    return this;
-  }
-
-  public void unsetRevision() {
-    __isset_bit_vector.clear(__REVISION_ISSET_ID);
-  }
-
-  /** Returns true if field revision is set (has been assigned a value) and false otherwise */
-  public boolean isSetRevision() {
-    return __isset_bit_vector.get(__REVISION_ISSET_ID);
-  }
-
-  public void setRevisionIsSet(boolean value) {
-    __isset_bit_vector.set(__REVISION_ISSET_ID, value);
-  }
-
-  public long getTimestamp() {
-    return this.timestamp;
-  }
-
-  public StoreFamilyRevision setTimestamp(long timestamp) {
-    this.timestamp = timestamp;
-    setTimestampIsSet(true);
-    return this;
-  }
-
-  public void unsetTimestamp() {
-    __isset_bit_vector.clear(__TIMESTAMP_ISSET_ID);
-  }
-
-  /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
-  public boolean isSetTimestamp() {
-    return __isset_bit_vector.get(__TIMESTAMP_ISSET_ID);
-  }
-
-  public void setTimestampIsSet(boolean value) {
-    __isset_bit_vector.set(__TIMESTAMP_ISSET_ID, value);
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case REVISION:
-      if (value == null) {
-        unsetRevision();
-      } else {
-        setRevision((Long)value);
-      }
-      break;
-
-    case TIMESTAMP:
-      if (value == null) {
-        unsetTimestamp();
-      } else {
-        setTimestamp((Long)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case REVISION:
-      return Long.valueOf(getRevision());
-
-    case TIMESTAMP:
-      return Long.valueOf(getTimestamp());
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
+    public void unsetRevision() {
+        __isset_bit_vector.clear(__REVISION_ISSET_ID);
     }
 
-    switch (field) {
-    case REVISION:
-      return isSetRevision();
-    case TIMESTAMP:
-      return isSetTimestamp();
+    /** Returns true if field revision is set (has been assigned a value) and false otherwise */
+    public boolean isSetRevision() {
+        return __isset_bit_vector.get(__REVISION_ISSET_ID);
     }
-    throw new IllegalStateException();
-  }
 
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof StoreFamilyRevision)
-      return this.equals((StoreFamilyRevision)that);
-    return false;
-  }
+    public void setRevisionIsSet(boolean value) {
+        __isset_bit_vector.set(__REVISION_ISSET_ID, value);
+    }
 
-  public boolean equals(StoreFamilyRevision that) {
-    if (that == null)
-      return false;
+    public long getTimestamp() {
+        return this.timestamp;
+    }
 
-    boolean this_present_revision = true;
-    boolean that_present_revision = true;
-    if (this_present_revision || that_present_revision) {
-      if (!(this_present_revision && that_present_revision))
-        return false;
-      if (this.revision != that.revision)
+    public StoreFamilyRevision setTimestamp(long timestamp) {
+        this.timestamp = timestamp;
+        setTimestampIsSet(true);
+        return this;
+    }
+
+    public void unsetTimestamp() {
+        __isset_bit_vector.clear(__TIMESTAMP_ISSET_ID);
+    }
+
+    /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
+    public boolean isSetTimestamp() {
+        return __isset_bit_vector.get(__TIMESTAMP_ISSET_ID);
+    }
+
+    public void setTimestampIsSet(boolean value) {
+        __isset_bit_vector.set(__TIMESTAMP_ISSET_ID, value);
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+        switch (field) {
+        case REVISION:
+            if (value == null) {
+                unsetRevision();
+            } else {
+                setRevision((Long) value);
+            }
+            break;
+
+        case TIMESTAMP:
+            if (value == null) {
+                unsetTimestamp();
+            } else {
+                setTimestamp((Long) value);
+            }
+            break;
+
+        }
+    }
+
+    public Object getFieldValue(_Fields field) {
+        switch (field) {
+        case REVISION:
+            return Long.valueOf(getRevision());
+
+        case TIMESTAMP:
+            return Long.valueOf(getTimestamp());
+
+        }
+        throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+        if (field == null) {
+            throw new IllegalArgumentException();
+        }
+
+        switch (field) {
+        case REVISION:
+            return isSetRevision();
+        case TIMESTAMP:
+            return isSetTimestamp();
+        }
+        throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+        if (that == null)
+            return false;
+        if (that instanceof StoreFamilyRevision)
+            return this.equals((StoreFamilyRevision) that);
         return false;
     }
 
-    boolean this_present_timestamp = true;
-    boolean that_present_timestamp = true;
-    if (this_present_timestamp || that_present_timestamp) {
-      if (!(this_present_timestamp && that_present_timestamp))
-        return false;
-      if (this.timestamp != that.timestamp)
-        return false;
+    public boolean equals(StoreFamilyRevision that) {
+        if (that == null)
+            return false;
+
+        boolean this_present_revision = true;
+        boolean that_present_revision = true;
+        if (this_present_revision || that_present_revision) {
+            if (!(this_present_revision && that_present_revision))
+                return false;
+            if (this.revision != that.revision)
+                return false;
+        }
+
+        boolean this_present_timestamp = true;
+        boolean that_present_timestamp = true;
+        if (this_present_timestamp || that_present_timestamp) {
+            if (!(this_present_timestamp && that_present_timestamp))
+                return false;
+            if (this.timestamp != that.timestamp)
+                return false;
+        }
+
+        return true;
     }
 
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    return 0;
-  }
-
-  public int compareTo(StoreFamilyRevision other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
+    @Override
+    public int hashCode() {
+        return 0;
     }
 
-    int lastComparison = 0;
-    StoreFamilyRevision typedOther = (StoreFamilyRevision)other;
+    public int compareTo(StoreFamilyRevision other) {
+        if (!getClass().equals(other.getClass())) {
+            return getClass().getName().compareTo(other.getClass().getName());
+        }
 
-    lastComparison = Boolean.valueOf(isSetRevision()).compareTo(typedOther.isSetRevision());
-    if (lastComparison != 0) {
-      return lastComparison;
+        int lastComparison = 0;
+        StoreFamilyRevision typedOther = (StoreFamilyRevision) other;
+
+        lastComparison = Boolean.valueOf(isSetRevision()).compareTo(typedOther.isSetRevision());
+        if (lastComparison != 0) {
+            return lastComparison;
+        }
+        if (isSetRevision()) {
+            lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.revision, typedOther.revision);
+            if (lastComparison != 0) {
+                return lastComparison;
+            }
+        }
+        lastComparison = Boolean.valueOf(isSetTimestamp()).compareTo(typedOther.isSetTimestamp());
+        if (lastComparison != 0) {
+            return lastComparison;
+        }
+        if (isSetTimestamp()) {
+            lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.timestamp, typedOther.timestamp);
+            if (lastComparison != 0) {
+                return lastComparison;
+            }
+        }
+        return 0;
     }
-    if (isSetRevision()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.revision, typedOther.revision);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
+
+    public _Fields fieldForId(int fieldId) {
+        return _Fields.findByThriftId(fieldId);
     }
-    lastComparison = Boolean.valueOf(isSetTimestamp()).compareTo(typedOther.isSetTimestamp());
-    if (lastComparison != 0) {
-      return lastComparison;
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField field;
+        iprot.readStructBegin();
+        while (true) {
+            field = iprot.readFieldBegin();
+            if (field.type == org.apache.thrift.protocol.TType.STOP) {
+                break;
+            }
+            switch (field.id) {
+            case 1: // REVISION
+                if (field.type == org.apache.thrift.protocol.TType.I64) {
+                    this.revision = iprot.readI64();
+                    setRevisionIsSet(true);
+                } else {
+                    org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+                }
+                break;
+            case 2: // TIMESTAMP
+                if (field.type == org.apache.thrift.protocol.TType.I64) {
+                    this.timestamp = iprot.readI64();
+                    setTimestampIsSet(true);
+                } else {
+                    org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+                }
+                break;
+            default:
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+            }
+            iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+
+        // check for required fields of primitive type, which can't be checked in the validate method
+        validate();
     }
-    if (isSetTimestamp()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.timestamp, typedOther.timestamp);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+        validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        oprot.writeFieldBegin(REVISION_FIELD_DESC);
+        oprot.writeI64(this.revision);
+        oprot.writeFieldEnd();
+        oprot.writeFieldBegin(TIMESTAMP_FIELD_DESC);
+        oprot.writeI64(this.timestamp);
+        oprot.writeFieldEnd();
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
     }
-    return 0;
-  }
 
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
+    @Override
+    public String toString() {
+        StringBuilder sb = new StringBuilder("StoreFamilyRevision(");
+        boolean first = true;
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    org.apache.thrift.protocol.TField field;
-    iprot.readStructBegin();
-    while (true)
-    {
-      field = iprot.readFieldBegin();
-      if (field.type == org.apache.thrift.protocol.TType.STOP) {
-        break;
-      }
-      switch (field.id) {
-        case 1: // REVISION
-          if (field.type == org.apache.thrift.protocol.TType.I64) {
-            this.revision = iprot.readI64();
-            setRevisionIsSet(true);
-          } else {
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        case 2: // TIMESTAMP
-          if (field.type == org.apache.thrift.protocol.TType.I64) {
-            this.timestamp = iprot.readI64();
-            setTimestampIsSet(true);
-          } else {
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        default:
-          org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-      }
-      iprot.readFieldEnd();
+        sb.append("revision:");
+        sb.append(this.revision);
+        first = false;
+        if (!first) sb.append(", ");
+        sb.append("timestamp:");
+        sb.append(this.timestamp);
+        first = false;
+        sb.append(")");
+        return sb.toString();
     }
-    iprot.readStructEnd();
 
-    // check for required fields of primitive type, which can't be checked in the validate method
-    validate();
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    validate();
-
-    oprot.writeStructBegin(STRUCT_DESC);
-    oprot.writeFieldBegin(REVISION_FIELD_DESC);
-    oprot.writeI64(this.revision);
-    oprot.writeFieldEnd();
-    oprot.writeFieldBegin(TIMESTAMP_FIELD_DESC);
-    oprot.writeI64(this.timestamp);
-    oprot.writeFieldEnd();
-    oprot.writeFieldStop();
-    oprot.writeStructEnd();
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("StoreFamilyRevision(");
-    boolean first = true;
-
-    sb.append("revision:");
-    sb.append(this.revision);
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("timestamp:");
-    sb.append(this.timestamp);
-    first = false;
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
+    public void validate() throws org.apache.thrift.TException {
+        // check for required fields
     }
-  }
 
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bit_vector = new BitSet(1);
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+        try {
+            write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+        } catch (org.apache.thrift.TException te) {
+            throw new java.io.IOException(te);
+        }
     }
-  }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+        try {
+            // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+            __isset_bit_vector = new BitSet(1);
+            read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+        } catch (org.apache.thrift.TException te) {
+            throw new java.io.IOException(te);
+        }
+    }
 
 }
 
diff --git a/storage-handlers/hbase/src/gen-java/org/apache/hcatalog/hbase/snapshot/transaction/thrift/StoreFamilyRevisionList.java b/storage-handlers/hbase/src/gen-java/org/apache/hcatalog/hbase/snapshot/transaction/thrift/StoreFamilyRevisionList.java
index e3c1c89..0f661cb 100644
--- a/storage-handlers/hbase/src/gen-java/org/apache/hcatalog/hbase/snapshot/transaction/thrift/StoreFamilyRevisionList.java
+++ b/storage-handlers/hbase/src/gen-java/org/apache/hcatalog/hbase/snapshot/transaction/thrift/StoreFamilyRevisionList.java
@@ -36,337 +36,334 @@
 import java.util.Map;
 
 public class StoreFamilyRevisionList implements org.apache.thrift.TBase<StoreFamilyRevisionList, StoreFamilyRevisionList._Fields>, java.io.Serializable, Cloneable {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("StoreFamilyRevisionList");
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("StoreFamilyRevisionList");
 
-  private static final org.apache.thrift.protocol.TField REVISION_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("revisionList", org.apache.thrift.protocol.TType.LIST, (short)1);
+    private static final org.apache.thrift.protocol.TField REVISION_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("revisionList", org.apache.thrift.protocol.TType.LIST, (short) 1);
 
-  public List<StoreFamilyRevision> revisionList; // required
+    public List<StoreFamilyRevision> revisionList; // required
 
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    REVISION_LIST((short)1, "revisionList");
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+        REVISION_LIST((short) 1, "revisionList");
 
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+        private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+        static {
+            for (_Fields field : EnumSet.allOf(_Fields.class)) {
+                byName.put(field.getFieldName(), field);
+            }
+        }
+
+        /**
+         * Find the _Fields constant that matches fieldId, or null if its not found.
+         */
+        public static _Fields findByThriftId(int fieldId) {
+            switch (fieldId) {
+            case 1: // REVISION_LIST
+                return REVISION_LIST;
+            default:
+                return null;
+            }
+        }
+
+        /**
+         * Find the _Fields constant that matches fieldId, throwing an exception
+         * if it is not found.
+         */
+        public static _Fields findByThriftIdOrThrow(int fieldId) {
+            _Fields fields = findByThriftId(fieldId);
+            if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+            return fields;
+        }
+
+        /**
+         * Find the _Fields constant that matches name, or null if its not found.
+         */
+        public static _Fields findByName(String name) {
+            return byName.get(name);
+        }
+
+        private final short _thriftId;
+        private final String _fieldName;
+
+        _Fields(short thriftId, String fieldName) {
+            _thriftId = thriftId;
+            _fieldName = fieldName;
+        }
+
+        public short getThriftFieldId() {
+            return _thriftId;
+        }
+
+        public String getFieldName() {
+            return _fieldName;
+        }
+    }
+
+    // isset id assignments
+
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
 
     static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
+        Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+        tmpMap.put(_Fields.REVISION_LIST, new org.apache.thrift.meta_data.FieldMetaData("revisionList", org.apache.thrift.TFieldRequirementType.DEFAULT,
+            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+                new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, StoreFamilyRevision.class))));
+        metaDataMap = Collections.unmodifiableMap(tmpMap);
+        org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(StoreFamilyRevisionList.class, metaDataMap);
+    }
+
+    public StoreFamilyRevisionList() {
+    }
+
+    public StoreFamilyRevisionList(
+        List<StoreFamilyRevision> revisionList) {
+        this();
+        this.revisionList = revisionList;
     }
 
     /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
+     * Performs a deep copy on <i>other</i>.
      */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // REVISION_LIST
-          return REVISION_LIST;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.REVISION_LIST, new org.apache.thrift.meta_data.FieldMetaData("revisionList", org.apache.thrift.TFieldRequirementType.DEFAULT,
-        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, StoreFamilyRevision.class))));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(StoreFamilyRevisionList.class, metaDataMap);
-  }
-
-  public StoreFamilyRevisionList() {
-  }
-
-  public StoreFamilyRevisionList(
-    List<StoreFamilyRevision> revisionList)
-  {
-    this();
-    this.revisionList = revisionList;
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public StoreFamilyRevisionList(StoreFamilyRevisionList other) {
-    if (other.isSetRevisionList()) {
-      List<StoreFamilyRevision> __this__revisionList = new ArrayList<StoreFamilyRevision>();
-      for (StoreFamilyRevision other_element : other.revisionList) {
-        __this__revisionList.add(new StoreFamilyRevision(other_element));
-      }
-      this.revisionList = __this__revisionList;
-    }
-  }
-
-  public StoreFamilyRevisionList deepCopy() {
-    return new StoreFamilyRevisionList(this);
-  }
-
-  @Override
-  public void clear() {
-    this.revisionList = null;
-  }
-
-  public int getRevisionListSize() {
-    return (this.revisionList == null) ? 0 : this.revisionList.size();
-  }
-
-  public java.util.Iterator<StoreFamilyRevision> getRevisionListIterator() {
-    return (this.revisionList == null) ? null : this.revisionList.iterator();
-  }
-
-  public void addToRevisionList(StoreFamilyRevision elem) {
-    if (this.revisionList == null) {
-      this.revisionList = new ArrayList<StoreFamilyRevision>();
-    }
-    this.revisionList.add(elem);
-  }
-
-  public List<StoreFamilyRevision> getRevisionList() {
-    return this.revisionList;
-  }
-
-  public StoreFamilyRevisionList setRevisionList(List<StoreFamilyRevision> revisionList) {
-    this.revisionList = revisionList;
-    return this;
-  }
-
-  public void unsetRevisionList() {
-    this.revisionList = null;
-  }
-
-  /** Returns true if field revisionList is set (has been assigned a value) and false otherwise */
-  public boolean isSetRevisionList() {
-    return this.revisionList != null;
-  }
-
-  public void setRevisionListIsSet(boolean value) {
-    if (!value) {
-      this.revisionList = null;
-    }
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case REVISION_LIST:
-      if (value == null) {
-        unsetRevisionList();
-      } else {
-        setRevisionList((List<StoreFamilyRevision>)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case REVISION_LIST:
-      return getRevisionList();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case REVISION_LIST:
-      return isSetRevisionList();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof StoreFamilyRevisionList)
-      return this.equals((StoreFamilyRevisionList)that);
-    return false;
-  }
-
-  public boolean equals(StoreFamilyRevisionList that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_revisionList = true && this.isSetRevisionList();
-    boolean that_present_revisionList = true && that.isSetRevisionList();
-    if (this_present_revisionList || that_present_revisionList) {
-      if (!(this_present_revisionList && that_present_revisionList))
-        return false;
-      if (!this.revisionList.equals(that.revisionList))
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    return 0;
-  }
-
-  public int compareTo(StoreFamilyRevisionList other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-    StoreFamilyRevisionList typedOther = (StoreFamilyRevisionList)other;
-
-    lastComparison = Boolean.valueOf(isSetRevisionList()).compareTo(typedOther.isSetRevisionList());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetRevisionList()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.revisionList, typedOther.revisionList);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    org.apache.thrift.protocol.TField field;
-    iprot.readStructBegin();
-    while (true)
-    {
-      field = iprot.readFieldBegin();
-      if (field.type == org.apache.thrift.protocol.TType.STOP) {
-        break;
-      }
-      switch (field.id) {
-        case 1: // REVISION_LIST
-          if (field.type == org.apache.thrift.protocol.TType.LIST) {
-            {
-              org.apache.thrift.protocol.TList _list0 = iprot.readListBegin();
-              this.revisionList = new ArrayList<StoreFamilyRevision>(_list0.size);
-              for (int _i1 = 0; _i1 < _list0.size; ++_i1)
-              {
-                StoreFamilyRevision _elem2; // required
-                _elem2 = new StoreFamilyRevision();
-                _elem2.read(iprot);
-                this.revisionList.add(_elem2);
-              }
-              iprot.readListEnd();
+    public StoreFamilyRevisionList(StoreFamilyRevisionList other) {
+        if (other.isSetRevisionList()) {
+            List<StoreFamilyRevision> __this__revisionList = new ArrayList<StoreFamilyRevision>();
+            for (StoreFamilyRevision other_element : other.revisionList) {
+                __this__revisionList.add(new StoreFamilyRevision(other_element));
             }
-          } else {
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-          }
-          break;
-        default:
-          org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-      }
-      iprot.readFieldEnd();
-    }
-    iprot.readStructEnd();
-
-    // check for required fields of primitive type, which can't be checked in the validate method
-    validate();
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    validate();
-
-    oprot.writeStructBegin(STRUCT_DESC);
-    if (this.revisionList != null) {
-      oprot.writeFieldBegin(REVISION_LIST_FIELD_DESC);
-      {
-        oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.revisionList.size()));
-        for (StoreFamilyRevision _iter3 : this.revisionList)
-        {
-          _iter3.write(oprot);
+            this.revisionList = __this__revisionList;
         }
-        oprot.writeListEnd();
-      }
-      oprot.writeFieldEnd();
     }
-    oprot.writeFieldStop();
-    oprot.writeStructEnd();
-  }
 
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("StoreFamilyRevisionList(");
-    boolean first = true;
-
-    sb.append("revisionList:");
-    if (this.revisionList == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.revisionList);
+    public StoreFamilyRevisionList deepCopy() {
+        return new StoreFamilyRevisionList(this);
     }
-    first = false;
-    sb.append(")");
-    return sb.toString();
-  }
 
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
+    @Override
+    public void clear() {
+        this.revisionList = null;
     }
-  }
 
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
+    public int getRevisionListSize() {
+        return (this.revisionList == null) ? 0 : this.revisionList.size();
     }
-  }
+
+    public java.util.Iterator<StoreFamilyRevision> getRevisionListIterator() {
+        return (this.revisionList == null) ? null : this.revisionList.iterator();
+    }
+
+    public void addToRevisionList(StoreFamilyRevision elem) {
+        if (this.revisionList == null) {
+            this.revisionList = new ArrayList<StoreFamilyRevision>();
+        }
+        this.revisionList.add(elem);
+    }
+
+    public List<StoreFamilyRevision> getRevisionList() {
+        return this.revisionList;
+    }
+
+    public StoreFamilyRevisionList setRevisionList(List<StoreFamilyRevision> revisionList) {
+        this.revisionList = revisionList;
+        return this;
+    }
+
+    public void unsetRevisionList() {
+        this.revisionList = null;
+    }
+
+    /** Returns true if field revisionList is set (has been assigned a value) and false otherwise */
+    public boolean isSetRevisionList() {
+        return this.revisionList != null;
+    }
+
+    public void setRevisionListIsSet(boolean value) {
+        if (!value) {
+            this.revisionList = null;
+        }
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+        switch (field) {
+        case REVISION_LIST:
+            if (value == null) {
+                unsetRevisionList();
+            } else {
+                setRevisionList((List<StoreFamilyRevision>) value);
+            }
+            break;
+
+        }
+    }
+
+    public Object getFieldValue(_Fields field) {
+        switch (field) {
+        case REVISION_LIST:
+            return getRevisionList();
+
+        }
+        throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+        if (field == null) {
+            throw new IllegalArgumentException();
+        }
+
+        switch (field) {
+        case REVISION_LIST:
+            return isSetRevisionList();
+        }
+        throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+        if (that == null)
+            return false;
+        if (that instanceof StoreFamilyRevisionList)
+            return this.equals((StoreFamilyRevisionList) that);
+        return false;
+    }
+
+    public boolean equals(StoreFamilyRevisionList that) {
+        if (that == null)
+            return false;
+
+        boolean this_present_revisionList = true && this.isSetRevisionList();
+        boolean that_present_revisionList = true && that.isSetRevisionList();
+        if (this_present_revisionList || that_present_revisionList) {
+            if (!(this_present_revisionList && that_present_revisionList))
+                return false;
+            if (!this.revisionList.equals(that.revisionList))
+                return false;
+        }
+
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        return 0;
+    }
+
+    public int compareTo(StoreFamilyRevisionList other) {
+        if (!getClass().equals(other.getClass())) {
+            return getClass().getName().compareTo(other.getClass().getName());
+        }
+
+        int lastComparison = 0;
+        StoreFamilyRevisionList typedOther = (StoreFamilyRevisionList) other;
+
+        lastComparison = Boolean.valueOf(isSetRevisionList()).compareTo(typedOther.isSetRevisionList());
+        if (lastComparison != 0) {
+            return lastComparison;
+        }
+        if (isSetRevisionList()) {
+            lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.revisionList, typedOther.revisionList);
+            if (lastComparison != 0) {
+                return lastComparison;
+            }
+        }
+        return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+        return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField field;
+        iprot.readStructBegin();
+        while (true) {
+            field = iprot.readFieldBegin();
+            if (field.type == org.apache.thrift.protocol.TType.STOP) {
+                break;
+            }
+            switch (field.id) {
+            case 1: // REVISION_LIST
+                if (field.type == org.apache.thrift.protocol.TType.LIST) {
+                    {
+                        org.apache.thrift.protocol.TList _list0 = iprot.readListBegin();
+                        this.revisionList = new ArrayList<StoreFamilyRevision>(_list0.size);
+                        for (int _i1 = 0; _i1 < _list0.size; ++_i1) {
+                            StoreFamilyRevision _elem2; // required
+                            _elem2 = new StoreFamilyRevision();
+                            _elem2.read(iprot);
+                            this.revisionList.add(_elem2);
+                        }
+                        iprot.readListEnd();
+                    }
+                } else {
+                    org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+                }
+                break;
+            default:
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+            }
+            iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+
+        // check for required fields of primitive type, which can't be checked in the validate method
+        validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+        validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        if (this.revisionList != null) {
+            oprot.writeFieldBegin(REVISION_LIST_FIELD_DESC);
+            {
+                oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.revisionList.size()));
+                for (StoreFamilyRevision _iter3 : this.revisionList) {
+                    _iter3.write(oprot);
+                }
+                oprot.writeListEnd();
+            }
+            oprot.writeFieldEnd();
+        }
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder sb = new StringBuilder("StoreFamilyRevisionList(");
+        boolean first = true;
+
+        sb.append("revisionList:");
+        if (this.revisionList == null) {
+            sb.append("null");
+        } else {
+            sb.append(this.revisionList);
+        }
+        first = false;
+        sb.append(")");
+        return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+        // check for required fields
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+        try {
+            write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+        } catch (org.apache.thrift.TException te) {
+            throw new java.io.IOException(te);
+        }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+        try {
+            read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+        } catch (org.apache.thrift.TException te) {
+            throw new java.io.IOException(te);
+        }
+    }
 
 }
 
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseAuthorizationProvider.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseAuthorizationProvider.java
index 39e0025..77abc0d 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseAuthorizationProvider.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseAuthorizationProvider.java
@@ -35,109 +35,109 @@
  * authorization functionality for HBase tables.
  */
 class HBaseAuthorizationProvider implements HiveAuthorizationProvider {
-    
+
     @Override
     public Configuration getConf() {
         return null;
     }
-    
+
     @Override
     public void setConf(Configuration conf) {
     }
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see
-     * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
-     * #init(org.apache.hadoop.conf.Configuration)
-     */
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #init(org.apache.hadoop.conf.Configuration)
+    */
     @Override
     public void init(Configuration conf) throws HiveException {
     }
-    
+
     @Override
     public HiveAuthenticationProvider getAuthenticator() {
         return null;
     }
-    
+
     @Override
     public void setAuthenticator(HiveAuthenticationProvider authenticator) {
     }
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see
-     * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
-     * #authorize(org.apache.hadoop.hive.ql.security.authorization.Privilege[],
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
-     */
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
     @Override
     public void authorize(Privilege[] readRequiredPriv,
-            Privilege[] writeRequiredPriv) throws HiveException,
-            AuthorizationException {
+                          Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
     }
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see
-     * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
-     * #authorize(org.apache.hadoop.hive.metastore.api.Database,
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
-     */
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.metastore.api.Database,
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
     @Override
     public void authorize(Database db, Privilege[] readRequiredPriv,
-            Privilege[] writeRequiredPriv) throws HiveException,
-            AuthorizationException {
+                          Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
     }
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see
-     * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
-     * #authorize(org.apache.hadoop.hive.ql.metadata.Table,
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
-     */
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.ql.metadata.Table,
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
     @Override
     public void authorize(Table table, Privilege[] readRequiredPriv,
-            Privilege[] writeRequiredPriv) throws HiveException,
-            AuthorizationException {
+                          Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
     }
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see
-     * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
-     * #authorize(org.apache.hadoop.hive.ql.metadata.Partition,
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
-     */
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.ql.metadata.Partition,
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
     @Override
     public void authorize(Partition part, Privilege[] readRequiredPriv,
-            Privilege[] writeRequiredPriv) throws HiveException,
-            AuthorizationException {
+                          Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
     }
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see
-     * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
-     * #authorize(org.apache.hadoop.hive.ql.metadata.Table,
-     * org.apache.hadoop.hive.ql.metadata.Partition, java.util.List,
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
-     */
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.ql.metadata.Table,
+    * org.apache.hadoop.hive.ql.metadata.Partition, java.util.List,
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
     @Override
     public void authorize(Table table, Partition part, List<String> columns,
-            Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-            throws HiveException, AuthorizationException {
+                          Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
     }
-    
+
 }
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseBaseOutputFormat.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseBaseOutputFormat.java
index 95a30ec..5805b84 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseBaseOutputFormat.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseBaseOutputFormat.java
@@ -36,14 +36,14 @@
 import org.apache.hcatalog.mapreduce.OutputJobInfo;
 
 public class HBaseBaseOutputFormat implements OutputFormat<WritableComparable<?>, Put>,
-        HiveOutputFormat<WritableComparable<?>, Put> {
+    HiveOutputFormat<WritableComparable<?>, Put> {
 
     @Override
     public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getHiveRecordWriter(
-            JobConf jc, Path finalOutPath,
-            Class<? extends Writable> valueClass, boolean isCompressed,
-            Properties tableProperties, Progressable progress)
-            throws IOException {
+        JobConf jc, Path finalOutPath,
+        Class<? extends Writable> valueClass, boolean isCompressed,
+        Properties tableProperties, Progressable progress)
+        throws IOException {
         throw new UnsupportedOperationException("Not implemented");
     }
 
@@ -55,13 +55,13 @@
 
     @Override
     public RecordWriter<WritableComparable<?>, Put> getRecordWriter(FileSystem ignored,
-            JobConf job, String name, Progressable progress) throws IOException {
+                                                                    JobConf job, String name, Progressable progress) throws IOException {
         OutputFormat<WritableComparable<?>, Put> outputFormat = getOutputFormat(job);
         return outputFormat.getRecordWriter(ignored, job, name, progress);
     }
 
     private OutputFormat<WritableComparable<?>, Put> getOutputFormat(JobConf job)
-            throws IOException {
+        throws IOException {
         String outputInfo = job.get(HCatConstants.HCAT_KEY_OUTPUT_INFO);
         OutputJobInfo outputJobInfo = (OutputJobInfo) HCatUtil.deserialize(outputInfo);
         OutputFormat<WritableComparable<?>, Put> outputFormat = null;
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseBulkOutputFormat.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseBulkOutputFormat.java
index b9aba35..93d09bc 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseBulkOutputFormat.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseBulkOutputFormat.java
@@ -51,7 +51,7 @@
 class HBaseBulkOutputFormat extends HBaseBaseOutputFormat {
 
     private final static ImmutableBytesWritable EMPTY_LIST = new ImmutableBytesWritable(
-            new byte[0]);
+        new byte[0]);
     private SequenceFileOutputFormat<WritableComparable<?>, Put> baseOutputFormat;
 
     public HBaseBulkOutputFormat() {
@@ -60,7 +60,7 @@
 
     @Override
     public void checkOutputSpecs(FileSystem ignored, JobConf job)
-            throws IOException {
+        throws IOException {
         baseOutputFormat.checkOutputSpecs(ignored, job);
         HBaseUtil.addHBaseDelegationToken(job);
         addJTDelegationToken(job);
@@ -68,13 +68,13 @@
 
     @Override
     public RecordWriter<WritableComparable<?>, Put> getRecordWriter(
-            FileSystem ignored, JobConf job, String name, Progressable progress)
-            throws IOException {
+        FileSystem ignored, JobConf job, String name, Progressable progress)
+        throws IOException {
         job.setOutputKeyClass(ImmutableBytesWritable.class);
         job.setOutputValueClass(Put.class);
         long version = HBaseRevisionManagerUtil.getOutputRevision(job);
         return new HBaseBulkRecordWriter(baseOutputFormat.getRecordWriter(
-                ignored, job, name, progress), version);
+            ignored, job, name, progress), version);
     }
 
     private void addJTDelegationToken(JobConf job) throws IOException {
@@ -84,7 +84,7 @@
             JobClient jobClient = new JobClient(new JobConf(job));
             try {
                 job.getCredentials().addToken(new Text("my mr token"),
-                        jobClient.getDelegationToken(null));
+                    jobClient.getDelegationToken(null));
             } catch (InterruptedException e) {
                 throw new IOException("Error while getting JT delegation token", e);
             }
@@ -92,21 +92,21 @@
     }
 
     private static class HBaseBulkRecordWriter implements
-            RecordWriter<WritableComparable<?>, Put> {
+        RecordWriter<WritableComparable<?>, Put> {
 
         private RecordWriter<WritableComparable<?>, Put> baseWriter;
         private final Long outputVersion;
 
         public HBaseBulkRecordWriter(
-                RecordWriter<WritableComparable<?>, Put> baseWriter,
-                Long outputVersion) {
+            RecordWriter<WritableComparable<?>, Put> baseWriter,
+            Long outputVersion) {
             this.baseWriter = baseWriter;
             this.outputVersion = outputVersion;
         }
 
         @Override
         public void write(WritableComparable<?> key, Put value)
-                throws IOException {
+            throws IOException {
             Put put = value;
             if (outputVersion != null) {
                 put = new Put(value.getRow(), outputVersion.longValue());
@@ -136,19 +136,19 @@
 
         @Override
         public void abortTask(TaskAttemptContext taskContext)
-                throws IOException {
+            throws IOException {
             baseOutputCommitter.abortTask(taskContext);
         }
 
         @Override
         public void commitTask(TaskAttemptContext taskContext)
-                throws IOException {
+            throws IOException {
             baseOutputCommitter.commitTask(taskContext);
         }
 
         @Override
         public boolean needsTaskCommit(TaskAttemptContext taskContext)
-                throws IOException {
+            throws IOException {
             return baseOutputCommitter.needsTaskCommit(taskContext);
         }
 
@@ -159,20 +159,20 @@
 
         @Override
         public void setupTask(TaskAttemptContext taskContext)
-                throws IOException {
+            throws IOException {
             baseOutputCommitter.setupTask(taskContext);
         }
 
         @Override
         public void abortJob(JobContext jobContext, int status)
-                throws IOException {
+            throws IOException {
             baseOutputCommitter.abortJob(jobContext, status);
             RevisionManager rm = null;
             try {
                 rm = HBaseRevisionManagerUtil
-                        .getOpenedRevisionManager(jobContext.getConfiguration());
+                    .getOpenedRevisionManager(jobContext.getConfiguration());
                 rm.abortWriteTransaction(HBaseRevisionManagerUtil
-                        .getWriteTransaction(jobContext.getConfiguration()));
+                    .getWriteTransaction(jobContext.getConfiguration()));
             } finally {
                 cleanIntermediate(jobContext);
                 if (rm != null)
@@ -189,18 +189,18 @@
                 Path srcPath = FileOutputFormat.getOutputPath(jobContext.getJobConf());
                 if (!FileSystem.get(conf).exists(srcPath)) {
                     throw new IOException("Failed to bulk import hfiles. " +
-                    		"Intermediate data directory is cleaned up or missing. " +
-                    		"Please look at the bulk import job if it exists for failure reason");
+                        "Intermediate data directory is cleaned up or missing. " +
+                        "Please look at the bulk import job if it exists for failure reason");
                 }
                 Path destPath = new Path(srcPath.getParent(), srcPath.getName() + "_hfiles");
                 boolean success = ImportSequenceFile.runJob(jobContext,
-                                conf.get(HBaseConstants.PROPERTY_OUTPUT_TABLE_NAME_KEY),
-                                srcPath,
-                                destPath);
-                if(!success) {
+                    conf.get(HBaseConstants.PROPERTY_OUTPUT_TABLE_NAME_KEY),
+                    srcPath,
+                    destPath);
+                if (!success) {
                     cleanIntermediate(jobContext);
                     throw new IOException("Failed to bulk import hfiles." +
-                    		" Please look at the bulk import job for failure reason");
+                        " Please look at the bulk import job for failure reason");
                 }
                 rm = HBaseRevisionManagerUtil.getOpenedRevisionManager(conf);
                 rm.commitWriteTransaction(HBaseRevisionManagerUtil.getWriteTransaction(conf));
@@ -212,7 +212,7 @@
         }
 
         private void cleanIntermediate(JobContext jobContext)
-                throws IOException {
+            throws IOException {
             FileSystem fs = FileSystem.get(jobContext.getConfiguration());
             fs.delete(FileOutputFormat.getOutputPath(jobContext.getJobConf()), true);
         }
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseConstants.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseConstants.java
index de784a6..80e53ac 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseConstants.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseConstants.java
@@ -26,13 +26,13 @@
 class HBaseConstants {
 
     /** key used to store write transaction object */
-    public static final String PROPERTY_WRITE_TXN_KEY = HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX+".hbase.mapreduce.writeTxn";
+    public static final String PROPERTY_WRITE_TXN_KEY = HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX + ".hbase.mapreduce.writeTxn";
 
     /** key used to define the name of the table to write to */
-    public static final String PROPERTY_OUTPUT_TABLE_NAME_KEY = HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX+".hbase.mapreduce.outputTableName";
+    public static final String PROPERTY_OUTPUT_TABLE_NAME_KEY = HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX + ".hbase.mapreduce.outputTableName";
 
     /** key used to define whether bulk storage output format will be used or not  */
-    public static final String PROPERTY_BULK_OUTPUT_MODE_KEY = HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX+".hbase.output.bulkMode";
+    public static final String PROPERTY_BULK_OUTPUT_MODE_KEY = HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX + ".hbase.output.bulkMode";
 
     /** key used to define the hbase table snapshot. */
     public static final String PROPERTY_TABLE_SNAPSHOT_KEY = HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX + "hbase.table.snapshot";
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseDirectOutputFormat.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseDirectOutputFormat.java
index 0001dd2..c4d819a 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseDirectOutputFormat.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseDirectOutputFormat.java
@@ -53,36 +53,36 @@
 
     @Override
     public RecordWriter<WritableComparable<?>, Put> getRecordWriter(FileSystem ignored,
-            JobConf job, String name, Progressable progress)
-            throws IOException {
+                                                                    JobConf job, String name, Progressable progress)
+        throws IOException {
         long version = HBaseRevisionManagerUtil.getOutputRevision(job);
         return new HBaseDirectRecordWriter(outputFormat.getRecordWriter(ignored, job, name,
-                progress), version);
+            progress), version);
     }
 
     @Override
     public void checkOutputSpecs(FileSystem ignored, JobConf job)
-            throws IOException {
+        throws IOException {
         outputFormat.checkOutputSpecs(ignored, job);
         HBaseUtil.addHBaseDelegationToken(job);
     }
 
     private static class HBaseDirectRecordWriter implements
-            RecordWriter<WritableComparable<?>, Put> {
+        RecordWriter<WritableComparable<?>, Put> {
 
         private RecordWriter<WritableComparable<?>, Put> baseWriter;
         private final Long outputVersion;
 
         public HBaseDirectRecordWriter(
-                RecordWriter<WritableComparable<?>, Put> baseWriter,
-                Long outputVersion) {
+            RecordWriter<WritableComparable<?>, Put> baseWriter,
+            Long outputVersion) {
             this.baseWriter = baseWriter;
             this.outputVersion = outputVersion;
         }
 
         @Override
         public void write(WritableComparable<?> key, Put value)
-                throws IOException {
+            throws IOException {
             Put put = value;
             if (outputVersion != null) {
                 put = new Put(value.getRow(), outputVersion.longValue());
@@ -109,17 +109,17 @@
 
         @Override
         public void abortTask(TaskAttemptContext taskContext)
-                throws IOException {
+            throws IOException {
         }
 
         @Override
         public void commitTask(TaskAttemptContext taskContext)
-                throws IOException {
+            throws IOException {
         }
 
         @Override
         public boolean needsTaskCommit(TaskAttemptContext taskContext)
-                throws IOException {
+            throws IOException {
             return false;
         }
 
@@ -129,19 +129,19 @@
 
         @Override
         public void setupTask(TaskAttemptContext taskContext)
-                throws IOException {
+            throws IOException {
         }
 
         @Override
         public void abortJob(JobContext jobContext, int status)
-                throws IOException {
+            throws IOException {
             super.abortJob(jobContext, status);
             RevisionManager rm = null;
             try {
                 rm = HBaseRevisionManagerUtil
-                        .getOpenedRevisionManager(jobContext.getConfiguration());
+                    .getOpenedRevisionManager(jobContext.getConfiguration());
                 Transaction writeTransaction = HBaseRevisionManagerUtil
-                        .getWriteTransaction(jobContext.getConfiguration());
+                    .getWriteTransaction(jobContext.getConfiguration());
                 rm.abortWriteTransaction(writeTransaction);
             } finally {
                 if (rm != null)
@@ -154,9 +154,9 @@
             RevisionManager rm = null;
             try {
                 rm = HBaseRevisionManagerUtil
-                        .getOpenedRevisionManager(jobContext.getConfiguration());
+                    .getOpenedRevisionManager(jobContext.getConfiguration());
                 rm.commitWriteTransaction(HBaseRevisionManagerUtil.getWriteTransaction(jobContext
-                        .getConfiguration()));
+                    .getConfiguration()));
             } finally {
                 if (rm != null)
                     rm.close();
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseHCatStorageHandler.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseHCatStorageHandler.java
index b0441db..42cd23c 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseHCatStorageHandler.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseHCatStorageHandler.java
@@ -106,21 +106,21 @@
             //do it here
             if (jobConf instanceof JobConf) { //Should be the case
                 HBaseUtil.addHBaseDelegationToken(copyOfConf);
-                ((JobConf)jobConf).getCredentials().addAll(copyOfConf.getCredentials());
+                ((JobConf) jobConf).getCredentials().addAll(copyOfConf.getCredentials());
             }
 
             String outputSchema = jobConf.get(HCatConstants.HCAT_KEY_OUTPUT_SCHEMA);
             jobProperties.put(TableInputFormat.SCAN_COLUMNS, getScanColumns(tableInfo, outputSchema));
 
             String serSnapshot = (String) inputJobInfo.getProperties().get(
-                    HBaseConstants.PROPERTY_TABLE_SNAPSHOT_KEY);
+                HBaseConstants.PROPERTY_TABLE_SNAPSHOT_KEY);
             if (serSnapshot == null) {
                 HCatTableSnapshot snapshot =
-                        HBaseRevisionManagerUtil.createSnapshot(
-                            RevisionManagerConfiguration.create(copyOfConf),
-                            qualifiedTableName, tableInfo);
+                    HBaseRevisionManagerUtil.createSnapshot(
+                        RevisionManagerConfiguration.create(copyOfConf),
+                        qualifiedTableName, tableInfo);
                 jobProperties.put(HBaseConstants.PROPERTY_TABLE_SNAPSHOT_KEY,
-                        HCatUtil.serialize(snapshot));
+                    HCatUtil.serialize(snapshot));
             }
 
             //This adds it directly to the jobConf. Setting in jobProperties does not get propagated
@@ -155,21 +155,21 @@
             HBaseConfiguration.addHbaseResources(copyOfConf);
 
             String txnString = outputJobInfo.getProperties().getProperty(
-                    HBaseConstants.PROPERTY_WRITE_TXN_KEY);
+                HBaseConstants.PROPERTY_WRITE_TXN_KEY);
             Transaction txn = null;
             if (txnString == null) {
                 txn = HBaseRevisionManagerUtil.beginWriteTransaction(qualifiedTableName, tableInfo,
-                        RevisionManagerConfiguration.create(copyOfConf));
+                    RevisionManagerConfiguration.create(copyOfConf));
                 String serializedTxn = HCatUtil.serialize(txn);
                 outputJobInfo.getProperties().setProperty(HBaseConstants.PROPERTY_WRITE_TXN_KEY,
-                        serializedTxn);
+                    serializedTxn);
             } else {
                 txn = (Transaction) HCatUtil.deserialize(txnString);
             }
             if (isBulkMode(outputJobInfo)) {
                 String tableLocation = tableInfo.getTableLocation();
                 String location = new Path(tableLocation, "REVISION_" + txn.getRevisionNumber())
-                        .toString();
+                    .toString();
                 outputJobInfo.getProperties().setProperty(PROPERTY_INT_OUTPUT_LOCATION, location);
                 // We are writing out an intermediate sequenceFile hence
                 // location is not passed in OutputJobInfo.getLocation()
@@ -199,7 +199,7 @@
     */
     @Override
     public HiveAuthorizationProvider getAuthorizationProvider()
-            throws HiveException {
+        throws HiveException {
 
         HBaseAuthorizationProvider hbaseAuth = new HBaseAuthorizationProvider();
         hbaseAuth.init(getConf());
@@ -230,7 +230,7 @@
      */
     @Override
     public void commitDropTable(Table tbl, boolean deleteData)
-            throws MetaException {
+        throws MetaException {
         checkDeleteTable(tbl);
 
     }
@@ -256,20 +256,20 @@
         try {
             String tableName = getFullyQualifiedHBaseTableName(tbl);
             String hbaseColumnsMapping = tbl.getParameters().get(
-                    HBaseSerDe.HBASE_COLUMNS_MAPPING);
+                HBaseSerDe.HBASE_COLUMNS_MAPPING);
 
             if (hbaseColumnsMapping == null) {
                 throw new MetaException(
-                        "No hbase.columns.mapping defined in table"
-                                + " properties.");
+                    "No hbase.columns.mapping defined in table"
+                        + " properties.");
             }
 
             List<String> hbaseColumnFamilies = new ArrayList<String>();
             List<String> hbaseColumnQualifiers = new ArrayList<String>();
             List<byte[]> hbaseColumnFamiliesBytes = new ArrayList<byte[]>();
             int iKey = HBaseUtil.parseColumnMapping(hbaseColumnsMapping,
-                    hbaseColumnFamilies, hbaseColumnFamiliesBytes,
-                    hbaseColumnQualifiers, null);
+                hbaseColumnFamilies, hbaseColumnFamiliesBytes,
+                hbaseColumnQualifiers, null);
 
             HTableDescriptor tableDesc;
             Set<String> uniqueColumnFamilies = new HashSet<String>();
@@ -283,7 +283,7 @@
 
                     for (String columnFamily : uniqueColumnFamilies) {
                         HColumnDescriptor familyDesc = new HColumnDescriptor(Bytes
-                                .toBytes(columnFamily));
+                            .toBytes(columnFamily));
                         familyDesc.setMaxVersions(Integer.MAX_VALUE);
                         tableDesc.addFamily(familyDesc);
                     }
@@ -292,20 +292,20 @@
                 } else {
                     // an external table
                     throw new MetaException("HBase table " + tableName
-                            + " doesn't exist while the table is "
-                            + "declared as an external table.");
+                        + " doesn't exist while the table is "
+                        + "declared as an external table.");
                 }
 
             } else {
                 if (!isExternal) {
                     throw new MetaException("Table " + tableName
-                            + " already exists within HBase."
-                            + " Use CREATE EXTERNAL TABLE instead to"
-                            + " register it in HCatalog.");
+                        + " already exists within HBase."
+                        + " Use CREATE EXTERNAL TABLE instead to"
+                        + " register it in HCatalog.");
                 }
                 // make sure the schema mapping is right
                 tableDesc = getHBaseAdmin().getTableDescriptor(
-                        Bytes.toBytes(tableName));
+                    Bytes.toBytes(tableName));
 
                 for (int i = 0; i < hbaseColumnFamilies.size(); i++) {
                     if (i == iKey) {
@@ -314,8 +314,8 @@
 
                     if (!tableDesc.hasFamily(hbaseColumnFamiliesBytes.get(i))) {
                         throw new MetaException("Column Family "
-                                + hbaseColumnFamilies.get(i)
-                                + " is not defined in hbase table " + tableName);
+                            + hbaseColumnFamilies.get(i)
+                            + " is not defined in hbase table " + tableName);
                     }
                 }
             }
@@ -401,7 +401,7 @@
         String tableName = tbl.getParameters().get(HBaseSerDe.HBASE_TABLE_NAME);
         if (tableName == null) {
             tableName = tbl.getSd().getSerdeInfo().getParameters()
-                    .get(HBaseSerDe.HBASE_TABLE_NAME);
+                .get(HBaseSerDe.HBASE_TABLE_NAME);
         }
         if (tableName == null) {
             if (tbl.getDbName().equals(MetaStoreUtils.DEFAULT_DATABASE_NAME)) {
@@ -414,14 +414,14 @@
         return tableName;
     }
 
-    static String getFullyQualifiedHBaseTableName(HCatTableInfo tableInfo){
+    static String getFullyQualifiedHBaseTableName(HCatTableInfo tableInfo) {
         String qualifiedName = tableInfo.getStorerInfo().getProperties()
-                .getProperty(HBaseSerDe.HBASE_TABLE_NAME);
+            .getProperty(HBaseSerDe.HBASE_TABLE_NAME);
         if (qualifiedName == null) {
             String databaseName = tableInfo.getDatabaseName();
             String tableName = tableInfo.getTableName();
             if ((databaseName == null)
-                    || (databaseName.equals(MetaStoreUtils.DEFAULT_DATABASE_NAME))) {
+                || (databaseName.equals(MetaStoreUtils.DEFAULT_DATABASE_NAME))) {
                 qualifiedName = tableName;
             } else {
                 qualifiedName = databaseName + "." + tableName;
@@ -451,7 +451,7 @@
     */
     @Override
     public Class<? extends SerDe> getSerDeClass()
-            throws UnsupportedOperationException {
+        throws UnsupportedOperationException {
         return HBaseSerDe.class;
     }
 
@@ -514,28 +514,28 @@
      */
     private void addOutputDependencyJars(Configuration conf) throws IOException {
         TableMapReduceUtil.addDependencyJars(conf,
-                //ZK
-                ZooKeeper.class,
-                //HBase
-                HTable.class,
-                //Hive
-                HiveException.class,
-                //HCatalog jar
-                HCatOutputFormat.class,
-                //hcat hbase storage handler jar
-                HBaseHCatStorageHandler.class,
-                //hive hbase storage handler jar
-                HBaseSerDe.class,
-                //hive jar
-                Table.class,
-                //libthrift jar
-                TBase.class,
-                //hbase jar
-                Bytes.class,
-                //thrift-fb303 .jar
-                FacebookBase.class,
-                //guava jar
-                ThreadFactoryBuilder.class);
+            //ZK
+            ZooKeeper.class,
+            //HBase
+            HTable.class,
+            //Hive
+            HiveException.class,
+            //HCatalog jar
+            HCatOutputFormat.class,
+            //hcat hbase storage handler jar
+            HBaseHCatStorageHandler.class,
+            //hive hbase storage handler jar
+            HBaseSerDe.class,
+            //hive jar
+            Table.class,
+            //libthrift jar
+            TBase.class,
+            //hbase jar
+            Bytes.class,
+            //thrift-fb303 .jar
+            FacebookBase.class,
+            //guava jar
+            ThreadFactoryBuilder.class);
     }
 
     /**
@@ -558,15 +558,15 @@
     public static boolean isBulkMode(OutputJobInfo outputJobInfo) {
         //Default is false
         String bulkMode = outputJobInfo.getTableInfo().getStorerInfo().getProperties()
-                .getProperty(HBaseConstants.PROPERTY_BULK_OUTPUT_MODE_KEY,
-                        "false");
+            .getProperty(HBaseConstants.PROPERTY_BULK_OUTPUT_MODE_KEY,
+                "false");
         return "true".equals(bulkMode);
     }
 
     private String getScanColumns(HCatTableInfo tableInfo, String outputColSchema) throws IOException {
         StringBuilder builder = new StringBuilder();
         String hbaseColumnMapping = tableInfo.getStorerInfo().getProperties()
-                .getProperty(HBaseSerDe.HBASE_COLUMNS_MAPPING);
+            .getProperty(HBaseSerDe.HBASE_COLUMNS_MAPPING);
         if (outputColSchema == null) {
             String[] splits = hbaseColumnMapping.split("[,]");
             for (int i = 0; i < splits.length; i++) {
@@ -578,14 +578,14 @@
             HCatSchema tableSchema = tableInfo.getDataColumns();
             List<String> outputFieldNames = outputSchema.getFieldNames();
             List<Integer> outputColumnMapping = new ArrayList<Integer>();
-            for(String fieldName: outputFieldNames){
+            for (String fieldName : outputFieldNames) {
                 int position = tableSchema.getPosition(fieldName);
                 outputColumnMapping.add(position);
             }
             List<String> columnFamilies = new ArrayList<String>();
             List<String> columnQualifiers = new ArrayList<String>();
             HBaseUtil.parseColumnMapping(hbaseColumnMapping, columnFamilies, null,
-                    columnQualifiers, null);
+                columnQualifiers, null);
             for (int i = 0; i < outputColumnMapping.size(); i++) {
                 int cfIndex = outputColumnMapping.get(i);
                 String cf = columnFamilies.get(cfIndex);
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseInputFormat.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseInputFormat.java
index 49d2cd3..df1bb45 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseInputFormat.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseInputFormat.java
@@ -66,8 +66,8 @@
      */
     @Override
     public RecordReader<ImmutableBytesWritable, Result> getRecordReader(
-            InputSplit split, JobConf job, Reporter reporter)
-            throws IOException {
+        InputSplit split, JobConf job, Reporter reporter)
+        throws IOException {
         String jobString = job.get(HCatConstants.HCAT_KEY_JOB_INFO);
         InputJobInfo inputJobInfo = (InputJobInfo) HCatUtil.deserialize(jobString);
 
@@ -103,20 +103,20 @@
      */
     @Override
     public org.apache.hadoop.mapred.InputSplit[] getSplits(JobConf job, int numSplits)
-            throws IOException {
+        throws IOException {
         inputFormat.setConf(job);
         return convertSplits(inputFormat.getSplits(HCatMapRedUtil.createJobContext(job, null,
-                Reporter.NULL)));
+            Reporter.NULL)));
     }
 
     private InputSplit[] convertSplits(List<org.apache.hadoop.mapreduce.InputSplit> splits) {
         InputSplit[] converted = new InputSplit[splits.size()];
         for (int i = 0; i < splits.size(); i++) {
             org.apache.hadoop.hbase.mapreduce.TableSplit tableSplit =
-                    (org.apache.hadoop.hbase.mapreduce.TableSplit) splits.get(i);
+                (org.apache.hadoop.hbase.mapreduce.TableSplit) splits.get(i);
             TableSplit newTableSplit = new TableSplit(tableSplit.getTableName(),
-                    tableSplit.getStartRow(),
-                    tableSplit.getEndRow(), tableSplit.getRegionLocation());
+                tableSplit.getStartRow(),
+                tableSplit.getEndRow(), tableSplit.getRegionLocation());
             converted[i] = newTableSplit;
         }
         return converted;
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseRevisionManagerUtil.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseRevisionManagerUtil.java
index b619d52..8d57d1b 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseRevisionManagerUtil.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseRevisionManagerUtil.java
@@ -65,7 +65,7 @@
      * @throws IOException Signals that an I/O exception has occurred.
      */
     static HCatTableSnapshot createSnapshot(Configuration jobConf,
-            String hbaseTableName, HCatTableInfo tableInfo ) throws IOException {
+                                            String hbaseTableName, HCatTableInfo tableInfo) throws IOException {
 
         RevisionManager rm = null;
         TableSnapshot snpt;
@@ -90,8 +90,8 @@
      * @throws IOException Signals that an I/O exception has occurred.
      */
     static HCatTableSnapshot createSnapshot(Configuration jobConf,
-            String tableName, long revision)
-            throws IOException {
+                                            String tableName, long revision)
+        throws IOException {
 
         TableSnapshot snpt;
         RevisionManager rm = null;
@@ -103,14 +103,14 @@
         }
 
         String inputJobString = jobConf.get(HCatConstants.HCAT_KEY_JOB_INFO);
-        if(inputJobString == null){
+        if (inputJobString == null) {
             throw new IOException(
-                    "InputJobInfo information not found in JobContext. "
-                            + "HCatInputFormat.setInput() not called?");
+                "InputJobInfo information not found in JobContext. "
+                    + "HCatInputFormat.setInput() not called?");
         }
         InputJobInfo inputInfo = (InputJobInfo) HCatUtil.deserialize(inputJobString);
         HCatTableSnapshot hcatSnapshot = HBaseRevisionManagerUtil
-                .convertSnapshot(snpt, inputInfo.getTableInfo());
+            .convertSnapshot(snpt, inputInfo.getTableInfo());
 
         return hcatSnapshot;
     }
@@ -123,7 +123,7 @@
      * @throws IOException
      */
     static RevisionManager getOpenedRevisionManager(Configuration jobConf) throws IOException {
-      return RevisionManagerFactory.getOpenedRevisionManager(jobConf);
+        return RevisionManagerFactory.getOpenedRevisionManager(jobConf);
     }
 
     static void closeRevisionManagerQuietly(RevisionManager rm) {
@@ -138,14 +138,14 @@
 
 
     static HCatTableSnapshot convertSnapshot(TableSnapshot hbaseSnapshot,
-            HCatTableInfo hcatTableInfo) throws IOException {
+                                             HCatTableInfo hcatTableInfo) throws IOException {
 
         HCatSchema hcatTableSchema = hcatTableInfo.getDataColumns();
         Map<String, String> hcatHbaseColMap = getHCatHBaseColumnMapping(hcatTableInfo);
         HashMap<String, Long> revisionMap = new HashMap<String, Long>();
 
         for (HCatFieldSchema fSchema : hcatTableSchema.getFields()) {
-            if(hcatHbaseColMap.containsKey(fSchema.getName())){
+            if (hcatHbaseColMap.containsKey(fSchema.getName())) {
                 String colFamily = hcatHbaseColMap.get(fSchema.getName());
                 long revisionID = hbaseSnapshot.getRevision(colFamily);
                 revisionMap.put(fSchema.getName(), revisionID);
@@ -153,12 +153,12 @@
         }
 
         HCatTableSnapshot hcatSnapshot = new HCatTableSnapshot(
-                 hcatTableInfo.getDatabaseName(), hcatTableInfo.getTableName(),revisionMap,hbaseSnapshot.getLatestRevision());
+            hcatTableInfo.getDatabaseName(), hcatTableInfo.getTableName(), revisionMap, hbaseSnapshot.getLatestRevision());
         return hcatSnapshot;
     }
 
     static TableSnapshot convertSnapshot(HCatTableSnapshot hcatSnapshot,
-            HCatTableInfo hcatTableInfo) throws IOException {
+                                         HCatTableInfo hcatTableInfo) throws IOException {
 
         HCatSchema hcatTableSchema = hcatTableInfo.getDataColumns();
         Map<String, Long> revisionMap = new HashMap<String, Long>();
@@ -172,8 +172,8 @@
         }
 
         String fullyQualifiedName = hcatSnapshot.getDatabaseName() + "."
-                + hcatSnapshot.getTableName();
-        return new TableSnapshot(fullyQualifiedName, revisionMap,hcatSnapshot.getLatestRevision());
+            + hcatSnapshot.getTableName();
+        return new TableSnapshot(fullyQualifiedName, revisionMap, hcatSnapshot.getLatestRevision());
 
     }
 
@@ -186,13 +186,13 @@
      * @throws IOException
      */
     static Transaction beginWriteTransaction(String qualifiedTableName,
-            HCatTableInfo tableInfo, Configuration jobConf) throws IOException {
+                                             HCatTableInfo tableInfo, Configuration jobConf) throws IOException {
         Transaction txn;
         RevisionManager rm = null;
         try {
             rm = HBaseRevisionManagerUtil.getOpenedRevisionManager(jobConf);
             String hBaseColumns = tableInfo.getStorerInfo().getProperties()
-                    .getProperty(HBaseSerDe.HBASE_COLUMNS_MAPPING);
+                .getProperty(HBaseSerDe.HBASE_COLUMNS_MAPPING);
             String[] splits = hBaseColumns.split("[,:]");
             Set<String> families = new HashSet<String>();
             for (int i = 0; i < splits.length; i += 2) {
@@ -207,13 +207,13 @@
     }
 
     static Transaction getWriteTransaction(Configuration conf) throws IOException {
-        OutputJobInfo outputJobInfo = (OutputJobInfo)HCatUtil.deserialize(conf.get(HCatConstants.HCAT_KEY_OUTPUT_INFO));
+        OutputJobInfo outputJobInfo = (OutputJobInfo) HCatUtil.deserialize(conf.get(HCatConstants.HCAT_KEY_OUTPUT_INFO));
         return (Transaction) HCatUtil.deserialize(outputJobInfo.getProperties()
-                                                               .getProperty(HBaseConstants.PROPERTY_WRITE_TXN_KEY));
+            .getProperty(HBaseConstants.PROPERTY_WRITE_TXN_KEY));
     }
 
     static void setWriteTransaction(Configuration conf, Transaction txn) throws IOException {
-        OutputJobInfo outputJobInfo = (OutputJobInfo)HCatUtil.deserialize(conf.get(HCatConstants.HCAT_KEY_OUTPUT_INFO));
+        OutputJobInfo outputJobInfo = (OutputJobInfo) HCatUtil.deserialize(conf.get(HCatConstants.HCAT_KEY_OUTPUT_INFO));
         outputJobInfo.getProperties().setProperty(HBaseConstants.PROPERTY_WRITE_TXN_KEY, HCatUtil.serialize(txn));
         conf.set(HCatConstants.HCAT_KEY_OUTPUT_INFO, HCatUtil.serialize(outputJobInfo));
     }
@@ -228,19 +228,19 @@
         return getWriteTransaction(conf).getRevisionNumber();
     }
 
-    private static Map<String, String> getHCatHBaseColumnMapping( HCatTableInfo hcatTableInfo)
-            throws IOException {
+    private static Map<String, String> getHCatHBaseColumnMapping(HCatTableInfo hcatTableInfo)
+        throws IOException {
 
         HCatSchema hcatTableSchema = hcatTableInfo.getDataColumns();
         StorerInfo storeInfo = hcatTableInfo.getStorerInfo();
         String hbaseColumnMapping = storeInfo.getProperties().getProperty(
-                HBaseSerDe.HBASE_COLUMNS_MAPPING);
+            HBaseSerDe.HBASE_COLUMNS_MAPPING);
 
         Map<String, String> hcatHbaseColMap = new HashMap<String, String>();
         List<String> columnFamilies = new ArrayList<String>();
         List<String> columnQualifiers = new ArrayList<String>();
         HBaseUtil.parseColumnMapping(hbaseColumnMapping, columnFamilies,
-                null, columnQualifiers, null);
+            null, columnQualifiers, null);
 
         for (HCatFieldSchema column : hcatTableSchema.getFields()) {
             int fieldPos = hcatTableSchema.getPosition(column.getName());
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseUtil.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseUtil.java
index 36395b7..07285aa 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseUtil.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseUtil.java
@@ -28,7 +28,7 @@
 
 class HBaseUtil {
 
-    private HBaseUtil(){
+    private HBaseUtil() {
     }
 
     /**
@@ -47,97 +47,97 @@
     static int parseColumnMapping(
         String columnMapping,
         List<String> colFamilies,
-        List<byte []> colFamiliesBytes,
+        List<byte[]> colFamiliesBytes,
         List<String> colQualifiers,
-        List<byte []> colQualifiersBytes) throws IOException {
+        List<byte[]> colQualifiersBytes) throws IOException {
 
-      int rowKeyIndex = -1;
+        int rowKeyIndex = -1;
 
-      if (colFamilies == null || colQualifiers == null) {
-        throw new IllegalArgumentException("Error: caller must pass in lists for the column families " +
-            "and qualifiers.");
-      }
-
-      colFamilies.clear();
-      colQualifiers.clear();
-
-      if (columnMapping == null) {
-        throw new IllegalArgumentException("Error: hbase.columns.mapping missing for this HBase table.");
-      }
-
-      if (columnMapping.equals("") || columnMapping.equals(HBaseSerDe.HBASE_KEY_COL)) {
-        throw new IllegalArgumentException("Error: hbase.columns.mapping specifies only the HBase table"
-            + " row key. A valid Hive-HBase table must specify at least one additional column.");
-      }
-
-      String [] mapping = columnMapping.split(",");
-
-      for (int i = 0; i < mapping.length; i++) {
-        String elem = mapping[i];
-        int idxFirst = elem.indexOf(":");
-        int idxLast = elem.lastIndexOf(":");
-
-        if (idxFirst < 0 || !(idxFirst == idxLast)) {
-          throw new IllegalArgumentException("Error: the HBase columns mapping contains a badly formed " +
-              "column family, column qualifier specification.");
+        if (colFamilies == null || colQualifiers == null) {
+            throw new IllegalArgumentException("Error: caller must pass in lists for the column families " +
+                "and qualifiers.");
         }
 
-        if (elem.equals(HBaseSerDe.HBASE_KEY_COL)) {
-          rowKeyIndex = i;
-          colFamilies.add(elem);
-          colQualifiers.add(null);
-        } else {
-          String [] parts = elem.split(":");
-          assert(parts.length > 0 && parts.length <= 2);
-          colFamilies.add(parts[0]);
+        colFamilies.clear();
+        colQualifiers.clear();
 
-          if (parts.length == 2) {
-            colQualifiers.add(parts[1]);
-          } else {
-            colQualifiers.add(null);
-          }
+        if (columnMapping == null) {
+            throw new IllegalArgumentException("Error: hbase.columns.mapping missing for this HBase table.");
         }
-      }
 
-      if (rowKeyIndex == -1) {
-        colFamilies.add(0, HBaseSerDe.HBASE_KEY_COL);
-        colQualifiers.add(0, null);
-        rowKeyIndex = 0;
-      }
-
-      if (colFamilies.size() != colQualifiers.size()) {
-        throw new IOException("Error in parsing the hbase columns mapping.");
-      }
-
-      // populate the corresponding byte [] if the client has passed in a non-null list
-      if (colFamiliesBytes != null) {
-        colFamiliesBytes.clear();
-
-        for (String fam : colFamilies) {
-          colFamiliesBytes.add(Bytes.toBytes(fam));
+        if (columnMapping.equals("") || columnMapping.equals(HBaseSerDe.HBASE_KEY_COL)) {
+            throw new IllegalArgumentException("Error: hbase.columns.mapping specifies only the HBase table"
+                + " row key. A valid Hive-HBase table must specify at least one additional column.");
         }
-      }
 
-      if (colQualifiersBytes != null) {
-        colQualifiersBytes.clear();
+        String[] mapping = columnMapping.split(",");
 
-        for (String qual : colQualifiers) {
-          if (qual == null) {
-            colQualifiersBytes.add(null);
-          } else {
-            colQualifiersBytes.add(Bytes.toBytes(qual));
-          }
+        for (int i = 0; i < mapping.length; i++) {
+            String elem = mapping[i];
+            int idxFirst = elem.indexOf(":");
+            int idxLast = elem.lastIndexOf(":");
+
+            if (idxFirst < 0 || !(idxFirst == idxLast)) {
+                throw new IllegalArgumentException("Error: the HBase columns mapping contains a badly formed " +
+                    "column family, column qualifier specification.");
+            }
+
+            if (elem.equals(HBaseSerDe.HBASE_KEY_COL)) {
+                rowKeyIndex = i;
+                colFamilies.add(elem);
+                colQualifiers.add(null);
+            } else {
+                String[] parts = elem.split(":");
+                assert (parts.length > 0 && parts.length <= 2);
+                colFamilies.add(parts[0]);
+
+                if (parts.length == 2) {
+                    colQualifiers.add(parts[1]);
+                } else {
+                    colQualifiers.add(null);
+                }
+            }
         }
-      }
 
-      if (colFamiliesBytes != null && colQualifiersBytes != null) {
-        if (colFamiliesBytes.size() != colQualifiersBytes.size()) {
-          throw new IOException("Error in caching the bytes for the hbase column families " +
-              "and qualifiers.");
+        if (rowKeyIndex == -1) {
+            colFamilies.add(0, HBaseSerDe.HBASE_KEY_COL);
+            colQualifiers.add(0, null);
+            rowKeyIndex = 0;
         }
-      }
 
-      return rowKeyIndex;
+        if (colFamilies.size() != colQualifiers.size()) {
+            throw new IOException("Error in parsing the hbase columns mapping.");
+        }
+
+        // populate the corresponding byte [] if the client has passed in a non-null list
+        if (colFamiliesBytes != null) {
+            colFamiliesBytes.clear();
+
+            for (String fam : colFamilies) {
+                colFamiliesBytes.add(Bytes.toBytes(fam));
+            }
+        }
+
+        if (colQualifiersBytes != null) {
+            colQualifiersBytes.clear();
+
+            for (String qual : colQualifiers) {
+                if (qual == null) {
+                    colQualifiersBytes.add(null);
+                } else {
+                    colQualifiersBytes.add(Bytes.toBytes(qual));
+                }
+            }
+        }
+
+        if (colFamiliesBytes != null && colQualifiersBytes != null) {
+            if (colFamiliesBytes.size() != colQualifiersBytes.size()) {
+                throw new IOException("Error in caching the bytes for the hbase column families " +
+                    "and qualifiers.");
+            }
+        }
+
+        return rowKeyIndex;
     }
 
     /**
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HCatTableSnapshot.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HCatTableSnapshot.java
index 23daf18..42770e1 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HCatTableSnapshot.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HCatTableSnapshot.java
@@ -27,7 +27,7 @@
  * record readers to obtain knowledge about the revisions of a
  * column to be filtered.
  */
-public class HCatTableSnapshot implements Serializable{
+public class HCatTableSnapshot implements Serializable {
 
     private static final long serialVersionUID = 1L;
     private String tableName;
@@ -52,15 +52,15 @@
     /**
      * @return The name of the database to which the table snapshot belongs.
      */
-    public String getDatabaseName(){
+    public String getDatabaseName() {
         return this.databaseName;
     }
 
     /**
      * @return The revision number of a column in a snapshot.
      */
-    long getRevision(String column){
-        if(columnMap.containsKey(column))
+    long getRevision(String column) {
+        if (columnMap.containsKey(column))
             return this.columnMap.get(column);
         return latestRevision;
     }
@@ -71,7 +71,7 @@
      * @param column The data column of the table
      * @return true, if successful
      */
-    boolean containsColumn(String column){
+    boolean containsColumn(String column) {
         return this.columnMap.containsKey(column);
     }
 
@@ -84,8 +84,8 @@
 
     @Override
     public String toString() {
-        String snapshot = " Database Name: " + this.databaseName +" Table Name : " + tableName +
-                 "Latest Revision: "+latestRevision+" Column revision : " + columnMap.toString();
+        String snapshot = " Database Name: " + this.databaseName + " Table Name : " + tableName +
+            "Latest Revision: " + latestRevision + " Column revision : " + columnMap.toString();
         return snapshot;
     }
 }
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HbaseSnapshotRecordReader.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HbaseSnapshotRecordReader.java
index 2288795..cb813d1 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HbaseSnapshotRecordReader.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HbaseSnapshotRecordReader.java
@@ -56,8 +56,8 @@
     private final Configuration conf;
     private final int maxRevisions = 1;
     private ResultScanner scanner;
-    private Scan  scan;
-    private HTable  htable;
+    private Scan scan;
+    private HTable htable;
     private TableSnapshot snapshot;
     private Iterator<Result> resultItr;
     private Set<Long> allAbortedTransactions;
@@ -69,9 +69,9 @@
         this.conf = conf;
         String snapshotString = conf.get(HBaseConstants.PROPERTY_TABLE_SNAPSHOT_KEY);
         HCatTableSnapshot hcatSnapshot = (HCatTableSnapshot) HCatUtil
-                .deserialize(snapshotString);
+            .deserialize(snapshotString);
         this.snapshot = HBaseRevisionManagerUtil.convertSnapshot(hcatSnapshot,
-                inpJobInfo.getTableInfo());
+            inpJobInfo.getTableInfo());
     }
 
     public void init() throws IOException {
@@ -104,7 +104,7 @@
             for (byte[] familyKey : families) {
                 String family = Bytes.toString(familyKey);
                 List<FamilyRevision> abortedWriteTransactions = rm.getAbortedWriteTransactions(
-                        tableName, family);
+                    tableName, family);
                 if (abortedWriteTransactions != null) {
                     for (FamilyRevision revision : abortedWriteTransactions) {
                         abortedTransactions.add(revision.getRevision());
@@ -172,7 +172,7 @@
     public boolean next(ImmutableBytesWritable key, Result value) throws IOException {
         if (this.resultItr == null) {
             LOG.warn("The HBase result iterator is found null. It is possible"
-                    + " that the record reader has already been closed.");
+                + " that the record reader has already been closed.");
         } else {
             while (resultItr.hasNext()) {
                 Result temp = resultItr.next();
@@ -233,7 +233,7 @@
             }
         }
 
-        if(finalKeyVals.size() == 0){
+        if (finalKeyVals.size() == 0) {
             return null;
         } else {
             KeyValue[] kvArray = new KeyValue[finalKeyVals.size()];
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/ImportSequenceFile.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/ImportSequenceFile.java
index 9c415b2..205cae4 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/ImportSequenceFile.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/ImportSequenceFile.java
@@ -64,12 +64,12 @@
     private final static String IMPORTER_WORK_DIR = "_IMPORTER_MR_WORK_DIR";
 
 
-    private static class SequenceFileImporter  extends Mapper<ImmutableBytesWritable, Put, ImmutableBytesWritable, Put> {
+    private static class SequenceFileImporter extends Mapper<ImmutableBytesWritable, Put, ImmutableBytesWritable, Put> {
 
         @Override
         public void map(ImmutableBytesWritable rowKey, Put value,
                         Context context)
-                throws IOException {
+            throws IOException {
             try {
                 context.write(new ImmutableBytesWritable(value.getRow()), value);
             } catch (InterruptedException e) {
@@ -112,7 +112,7 @@
                 @Override
                 public void abortJob(JobContext jobContext, JobStatus.State state) throws IOException {
                     try {
-                        baseOutputCommitter.abortJob(jobContext,state);
+                        baseOutputCommitter.abortJob(jobContext, state);
                     } finally {
                         cleanupScratch(jobContext);
                     }
@@ -124,13 +124,13 @@
                         baseOutputCommitter.commitJob(jobContext);
                         Configuration conf = jobContext.getConfiguration();
                         try {
-                        //import hfiles
-                        new LoadIncrementalHFiles(conf)
+                            //import hfiles
+                            new LoadIncrementalHFiles(conf)
                                 .doBulkLoad(HFileOutputFormat.getOutputPath(jobContext),
-                                                   new HTable(conf,
-                                                                      conf.get(HBaseConstants.PROPERTY_OUTPUT_TABLE_NAME_KEY)));
+                                    new HTable(conf,
+                                        conf.get(HBaseConstants.PROPERTY_OUTPUT_TABLE_NAME_KEY)));
                         } catch (Exception e) {
-                        	throw new IOException("BulkLoad failed.", e);
+                            throw new IOException("BulkLoad failed.", e);
                         }
                     } finally {
                         cleanupScratch(jobContext);
@@ -146,16 +146,16 @@
                     }
                 }
 
-                private void cleanupScratch(JobContext context) throws IOException{
+                private void cleanupScratch(JobContext context) throws IOException {
                     FileSystem fs = FileSystem.get(context.getConfiguration());
-                    fs.delete(HFileOutputFormat.getOutputPath(context),true);
+                    fs.delete(HFileOutputFormat.getOutputPath(context), true);
                 }
             };
         }
     }
 
     private static Job createSubmittableJob(Configuration conf, String tableName, Path inputDir, Path scratchDir, boolean localMode)
-            throws IOException {
+        throws IOException {
         Job job = new Job(conf, NAME + "_" + tableName);
         job.setJarByClass(SequenceFileImporter.class);
         FileInputFormat.setInputPaths(job, inputDir);
@@ -172,16 +172,16 @@
         job.setOutputFormatClass(ImporterOutputFormat.class);
 
         //local mode doesn't support symbolic links so we have to manually set the actual path
-        if(localMode) {
+        if (localMode) {
             String partitionFile = null;
-            for(URI uri: DistributedCache.getCacheFiles(job.getConfiguration())) {
-                if(DEFAULT_PATH.equals(uri.getFragment())) {
+            for (URI uri : DistributedCache.getCacheFiles(job.getConfiguration())) {
+                if (DEFAULT_PATH.equals(uri.getFragment())) {
                     partitionFile = uri.toString();
                     break;
                 }
             }
-            partitionFile = partitionFile.substring(0,partitionFile.lastIndexOf("#"));
-            job.getConfiguration().set(TotalOrderPartitioner.PARTITIONER_PATH,partitionFile.toString());
+            partitionFile = partitionFile.substring(0, partitionFile.lastIndexOf("#"));
+            job.getConfiguration().set(TotalOrderPartitioner.PARTITIONER_PATH, partitionFile.toString());
         }
 
         return job;
@@ -190,7 +190,7 @@
     /**
      * Method to run the Importer MapReduce Job. Normally will be called by another MR job
      * during OutputCommitter.commitJob().
-      * @param parentContext JobContext of the parent job
+     * @param parentContext JobContext of the parent job
      * @param tableName name of table to bulk load data into
      * @param InputDir path of SequenceFile formatted data to read
      * @param scratchDir temporary path for the Importer MR job to build the HFiles which will be imported
@@ -199,21 +199,21 @@
     static boolean runJob(JobContext parentContext, String tableName, Path InputDir, Path scratchDir) {
         Configuration parentConf = parentContext.getConfiguration();
         Configuration conf = new Configuration();
-        for(Map.Entry<String,String> el: parentConf) {
-            if(el.getKey().startsWith("hbase."))
-                conf.set(el.getKey(),el.getValue());
-            if(el.getKey().startsWith("mapred.cache.archives"))
-                conf.set(el.getKey(),el.getValue());
+        for (Map.Entry<String, String> el : parentConf) {
+            if (el.getKey().startsWith("hbase."))
+                conf.set(el.getKey(), el.getValue());
+            if (el.getKey().startsWith("mapred.cache.archives"))
+                conf.set(el.getKey(), el.getValue());
         }
 
         //Inherit jar dependencies added to distributed cache loaded by parent job
-        conf.set("mapred.job.classpath.archives",parentConf.get("mapred.job.classpath.archives", ""));
-        conf.set("mapreduce.job.cache.archives.visibilities",parentConf.get("mapreduce.job.cache.archives.visibilities",""));
+        conf.set("mapred.job.classpath.archives", parentConf.get("mapred.job.classpath.archives", ""));
+        conf.set("mapreduce.job.cache.archives.visibilities", parentConf.get("mapreduce.job.cache.archives.visibilities", ""));
 
         //Temporary fix until hbase security is ready
         //We need the written HFile to be world readable so
         //hbase regionserver user has the privileges to perform a hdfs move
-        if(parentConf.getBoolean("hadoop.security.authorization", false)) {
+        if (parentConf.getBoolean("hadoop.security.authorization", false)) {
             FsPermission.setUMask(conf, FsPermission.valueOf("----------"));
         }
 
@@ -225,25 +225,24 @@
         boolean success = false;
         try {
             FileSystem fs = FileSystem.get(parentConf);
-            Path workDir = new Path(new Job(parentConf).getWorkingDirectory(),IMPORTER_WORK_DIR);
-            if(!fs.mkdirs(workDir))
-                throw new IOException("Importer work directory already exists: "+workDir);
+            Path workDir = new Path(new Job(parentConf).getWorkingDirectory(), IMPORTER_WORK_DIR);
+            if (!fs.mkdirs(workDir))
+                throw new IOException("Importer work directory already exists: " + workDir);
             Job job = createSubmittableJob(conf, tableName, InputDir, scratchDir, localMode);
             job.setWorkingDirectory(workDir);
             job.getCredentials().addAll(parentContext.getCredentials());
             success = job.waitForCompletion(true);
             fs.delete(workDir, true);
             //We only cleanup on success because failure might've been caused by existence of target directory
-            if(localMode && success)
-            {
-                new ImporterOutputFormat().getOutputCommitter(org.apache.hadoop.mapred.HCatMapRedUtil.createTaskAttemptContext(conf,new TaskAttemptID())).commitJob(job);
+            if (localMode && success) {
+                new ImporterOutputFormat().getOutputCommitter(org.apache.hadoop.mapred.HCatMapRedUtil.createTaskAttemptContext(conf, new TaskAttemptID())).commitJob(job);
             }
         } catch (InterruptedException e) {
             LOG.error("ImportSequenceFile Failed", e);
         } catch (ClassNotFoundException e) {
-            LOG.error("ImportSequenceFile Failed",e);
+            LOG.error("ImportSequenceFile Failed", e);
         } catch (IOException e) {
-            LOG.error("ImportSequenceFile Failed",e);
+            LOG.error("ImportSequenceFile Failed", e);
         }
         return success;
     }
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/IDGenerator.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/IDGenerator.java
index ba6dd6b..29de3e7 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/IDGenerator.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/IDGenerator.java
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 package org.apache.hcatalog.hbase.snapshot;
+
 import java.io.IOException;
 import java.nio.charset.Charset;
 
@@ -32,7 +33,7 @@
 /**
  * This class generates revision id's for transactions.
  */
-class IDGenerator implements LockListener{
+class IDGenerator implements LockListener {
 
     private ZooKeeper zookeeper;
     private String zNodeDataLoc;
@@ -41,7 +42,7 @@
     private static final Logger LOG = LoggerFactory.getLogger(IDGenerator.class);
 
     IDGenerator(ZooKeeper zookeeper, String tableName, String idGenNode)
-            throws IOException {
+        throws IOException {
         this.zookeeper = zookeeper;
         this.zNodeDataLoc = idGenNode;
         this.zNodeLockBasePath = PathUtil.getLockManagementNode(idGenNode);
@@ -53,7 +54,7 @@
      * @return revision ID
      * @throws IOException
      */
-    public long obtainID() throws IOException{
+    public long obtainID() throws IOException {
         WriteLock wLock = new WriteLock(zookeeper, zNodeLockBasePath, Ids.OPEN_ACL_UNSAFE);
         wLock.setLockListener(this);
         try {
@@ -62,7 +63,7 @@
                 //TO DO : Let this request queue up and try obtaining lock.
                 throw new IOException("Unable to obtain lock to obtain id.");
             } else {
-                    id = incrementAndReadCounter();
+                id = incrementAndReadCounter();
             }
         } catch (KeeperException e) {
             LOG.warn("Exception while obtaining lock for ID.", e);
@@ -82,34 +83,34 @@
      * @return revision ID
      * @throws IOException
      */
-    public long readID() throws IOException{
+    public long readID() throws IOException {
         long curId;
         try {
             Stat stat = new Stat();
             byte[] data = zookeeper.getData(this.zNodeDataLoc, false, stat);
-            curId = Long.parseLong(new String(data,Charset.forName("UTF-8")));
+            curId = Long.parseLong(new String(data, Charset.forName("UTF-8")));
         } catch (KeeperException e) {
             LOG.warn("Exception while reading current revision id.", e);
             throw new IOException("Exception while reading current revision id.", e);
         } catch (InterruptedException e) {
             LOG.warn("Exception while reading current revision id.", e);
-            throw new IOException("Exception while reading current revision id.",e);
+            throw new IOException("Exception while reading current revision id.", e);
         }
 
         return curId;
     }
 
 
-    private long incrementAndReadCounter() throws IOException{
+    private long incrementAndReadCounter() throws IOException {
 
         long curId, usedId;
         try {
             Stat stat = new Stat();
             byte[] data = zookeeper.getData(this.zNodeDataLoc, false, stat);
-            usedId = Long.parseLong((new String(data,Charset.forName("UTF-8"))));
-            curId = usedId +1;
+            usedId = Long.parseLong((new String(data, Charset.forName("UTF-8"))));
+            curId = usedId + 1;
             String lastUsedID = String.valueOf(curId);
-            zookeeper.setData(this.zNodeDataLoc, lastUsedID.getBytes(Charset.forName("UTF-8")), -1 );
+            zookeeper.setData(this.zNodeDataLoc, lastUsedID.getBytes(Charset.forName("UTF-8")), -1);
 
         } catch (KeeperException e) {
             LOG.warn("Exception while incrementing revision id.", e);
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/PathUtil.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/PathUtil.java
index 66f79a7..c2a9a4b 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/PathUtil.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/PathUtil.java
@@ -33,10 +33,10 @@
  * baseDir/TrasactionBasePath/TableB/columnFamily-1/abortedTxns
 
  */
-public class PathUtil{
+public class PathUtil {
 
-    static final String      DATA_DIR = "/data";
-    static final String      CLOCK_NODE   = "/clock";
+    static final String DATA_DIR = "/data";
+    static final String CLOCK_NODE = "/clock";
 
     /**
      * This method returns the data path associated with the currently
@@ -47,10 +47,10 @@
      * @return The path of the running transactions data.
      */
     static String getRunningTxnInfoPath(String baseDir, String tableName,
-            String columnFamily) {
+                                        String columnFamily) {
         String txnBasePath = getTransactionBasePath(baseDir);
         String path = txnBasePath + "/" + tableName + "/" + columnFamily
-                + "/runningTxns";
+            + "/runningTxns";
         return path;
     }
 
@@ -63,10 +63,10 @@
      * @return The path of the aborted transactions data.
      */
     static String getAbortInformationPath(String baseDir, String tableName,
-            String columnFamily) {
+                                          String columnFamily) {
         String txnBasePath = getTransactionBasePath(baseDir);
         String path = txnBasePath + "/" + tableName + "/" + columnFamily
-                + "/abortData";
+            + "/abortData";
         return path;
     }
 
@@ -83,13 +83,13 @@
         return revisionIDNode;
     }
 
-   /**
-    * Gets the lock management node for any znode that needs to be locked.
-    *
-    * @param path the path of the znode.
-    * @return the lock management node path.
-    */
-   static String getLockManagementNode(String path) {
+    /**
+     * Gets the lock management node for any znode that needs to be locked.
+     *
+     * @param path the path of the znode.
+     * @return the lock management node path.
+     */
+    static String getLockManagementNode(String path) {
         String lockNode = path + "_locknode_";
         return lockNode;
     }
@@ -112,7 +112,7 @@
      * @param tableName the table name
      * @return the txn data path for the table.
      */
-    static String getTxnDataPath(String baseDir, String tableName){
+    static String getTxnDataPath(String baseDir, String tableName) {
         String txnBasePath = getTransactionBasePath(baseDir);
         String path = txnBasePath + "/" + tableName;
         return path;
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RMConstants.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RMConstants.java
index 303ded4..5981761 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RMConstants.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RMConstants.java
@@ -19,11 +19,11 @@
 package org.apache.hcatalog.hbase.snapshot;
 
 public class RMConstants {
-  public static final String REVISION_MGR_ENDPOINT_IMPL_CLASS = "revision.manager.endpoint.impl.class";
+    public static final String REVISION_MGR_ENDPOINT_IMPL_CLASS = "revision.manager.endpoint.impl.class";
 
-  public static final String WRITE_TRANSACTION_TIMEOUT = "revision.manager.writeTxn.timeout";
+    public static final String WRITE_TRANSACTION_TIMEOUT = "revision.manager.writeTxn.timeout";
 
-  public static final String ZOOKEEPER_HOSTLIST = "revision.manager.zk.hostList";
+    public static final String ZOOKEEPER_HOSTLIST = "revision.manager.zk.hostList";
 
-  public static final String ZOOKEEPER_DATADIR = "revision.manager.zk.dataDir";
+    public static final String ZOOKEEPER_DATADIR = "revision.manager.zk.dataDir";
 }
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManager.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManager.java
index c57c62a..511926c 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManager.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManager.java
@@ -73,7 +73,7 @@
      * @throws IOException
      */
     public Transaction beginWriteTransaction(String table, List<String> families)
-            throws IOException;
+        throws IOException;
 
     /**
      * Start the write transaction.
@@ -85,7 +85,7 @@
      * @throws IOException
      */
     public Transaction beginWriteTransaction(String table,
-            List<String> families, long keepAlive) throws IOException;
+                                             List<String> families, long keepAlive) throws IOException;
 
     /**
      * Commit the write transaction.
@@ -94,7 +94,7 @@
      * @throws IOException
      */
     public void commitWriteTransaction(Transaction transaction)
-            throws IOException;
+        throws IOException;
 
     /**
      * Abort the write transaction.
@@ -103,7 +103,7 @@
      * @throws IOException
      */
     public void abortWriteTransaction(Transaction transaction)
-            throws IOException;
+        throws IOException;
 
     /**
      * Get the list of aborted Transactions for a column family
@@ -114,7 +114,7 @@
      * @throws java.io.IOException
      */
     public List<FamilyRevision> getAbortedWriteTransactions(String table,
-        String columnFamily) throws IOException;
+                                                            String columnFamily) throws IOException;
 
     /**
      * Create the latest snapshot of the table.
@@ -134,7 +134,7 @@
      * @throws IOException
      */
     public TableSnapshot createSnapshot(String tableName, long revision)
-            throws IOException;
+        throws IOException;
 
     /**
      * Extends the expiration of a transaction by the time indicated by keep alive.
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManagerConfiguration.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManagerConfiguration.java
index f181666..1d442fc 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManagerConfiguration.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManagerConfiguration.java
@@ -24,36 +24,35 @@
 public class RevisionManagerConfiguration {
 
 
+    public static Configuration addResources(Configuration conf) {
+        conf.addDefaultResource("revision-manager-default.xml");
+        conf.addResource("revision-manager-site.xml");
+        return conf;
+    }
 
-  public static Configuration addResources(Configuration conf) {
-    conf.addDefaultResource("revision-manager-default.xml");
-    conf.addResource("revision-manager-site.xml");
-    return conf;
-  }
+    /**
+     * Creates a Configuration with Revision Manager resources
+     * @return a Configuration with Revision Manager resources
+     */
+    public static Configuration create() {
+        Configuration conf = new Configuration();
+        return addResources(conf);
+    }
 
-  /**
-   * Creates a Configuration with Revision Manager resources
-   * @return a Configuration with Revision Manager resources
-   */
-  public static Configuration create() {
-    Configuration conf = new Configuration();
-    return addResources(conf);
-  }
-
-  /**
-   * Creates a clone of passed configuration.
-   * @param that Configuration to clone.
-   * @return a Configuration created with the revision-manager-*.xml files plus
-   * the given configuration.
-   */
-  public static Configuration create(final Configuration that) {
-    Configuration conf = create();
-    //we need to merge things instead of doing new Configuration(that)
-    //because of a bug in Configuration wherein the config
-    //set on the MR fronted will get loaded on the backend as resouce called job.xml
-    //hence adding resources on the backed could potentially overwrite properties
-    //set on the frontend which we shouldn't be doing here
-    HBaseConfiguration.merge(conf, that);
-    return conf;
-  }
+    /**
+     * Creates a clone of passed configuration.
+     * @param that Configuration to clone.
+     * @return a Configuration created with the revision-manager-*.xml files plus
+     * the given configuration.
+     */
+    public static Configuration create(final Configuration that) {
+        Configuration conf = create();
+        //we need to merge things instead of doing new Configuration(that)
+        //because of a bug in Configuration wherein the config
+        //set on the MR fronted will get loaded on the backend as resouce called job.xml
+        //hence adding resources on the backed could potentially overwrite properties
+        //set on the frontend which we shouldn't be doing here
+        HBaseConfiguration.merge(conf, that);
+        return conf;
+    }
 }
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManagerEndpoint.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManagerEndpoint.java
index 8f56b21..5187288 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManagerEndpoint.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManagerEndpoint.java
@@ -35,106 +35,106 @@
  */
 public class RevisionManagerEndpoint extends BaseEndpointCoprocessor implements RevisionManagerProtocol {
 
-  private static final Logger LOGGER =
-		      LoggerFactory.getLogger(RevisionManagerEndpoint.class.getName());
-  
-  private RevisionManager rmImpl = null;
+    private static final Logger LOGGER =
+        LoggerFactory.getLogger(RevisionManagerEndpoint.class.getName());
 
-  @Override
-  public void start(CoprocessorEnvironment env) {
-    super.start(env);
-    try {
-      Configuration conf = RevisionManagerConfiguration.create(env.getConfiguration());
-      String className = conf.get(RMConstants.REVISION_MGR_ENDPOINT_IMPL_CLASS,
-          ZKBasedRevisionManager.class.getName());
-      LOGGER.debug("Using Revision Manager implementation: {}",className);
-      rmImpl = RevisionManagerFactory.getOpenedRevisionManager(className, conf);
-    } catch (IOException e) {
-      LOGGER.error("Failed to initialize revision manager", e);
+    private RevisionManager rmImpl = null;
+
+    @Override
+    public void start(CoprocessorEnvironment env) {
+        super.start(env);
+        try {
+            Configuration conf = RevisionManagerConfiguration.create(env.getConfiguration());
+            String className = conf.get(RMConstants.REVISION_MGR_ENDPOINT_IMPL_CLASS,
+                ZKBasedRevisionManager.class.getName());
+            LOGGER.debug("Using Revision Manager implementation: {}", className);
+            rmImpl = RevisionManagerFactory.getOpenedRevisionManager(className, conf);
+        } catch (IOException e) {
+            LOGGER.error("Failed to initialize revision manager", e);
+        }
     }
-  }
 
-  @Override
-  public void stop(CoprocessorEnvironment env) {
-    if (rmImpl != null) {
-      try {
-        rmImpl.close();
-      } catch (IOException e) {
-        LOGGER.warn("Error closing revision manager.", e);
-      }
+    @Override
+    public void stop(CoprocessorEnvironment env) {
+        if (rmImpl != null) {
+            try {
+                rmImpl.close();
+            } catch (IOException e) {
+                LOGGER.warn("Error closing revision manager.", e);
+            }
+        }
+        super.stop(env);
     }
-    super.stop(env);
-  }
 
-  @Override
-  public void initialize(Configuration conf) {
-    // do nothing, HBase controls life cycle
-  }
+    @Override
+    public void initialize(Configuration conf) {
+        // do nothing, HBase controls life cycle
+    }
 
-  @Override
-  public void open() throws IOException {
-    // do nothing, HBase controls life cycle
-  }
+    @Override
+    public void open() throws IOException {
+        // do nothing, HBase controls life cycle
+    }
 
-  @Override
-  public void close() throws IOException {
-    // do nothing, HBase controls life cycle
-  }
+    @Override
+    public void close() throws IOException {
+        // do nothing, HBase controls life cycle
+    }
 
-  @Override
-  public void createTable(String table, List<String> columnFamilies) throws IOException {
-    rmImpl.createTable(table, columnFamilies);
-  }
+    @Override
+    public void createTable(String table, List<String> columnFamilies) throws IOException {
+        rmImpl.createTable(table, columnFamilies);
+    }
 
-  @Override
-  public void dropTable(String table) throws IOException {
-    rmImpl.dropTable(table);
-  }
+    @Override
+    public void dropTable(String table) throws IOException {
+        rmImpl.dropTable(table);
+    }
 
-  @Override
-  public Transaction beginWriteTransaction(String table, List<String> families)
-      throws IOException {
-    return rmImpl.beginWriteTransaction(table, families);
-  }
+    @Override
+    public Transaction beginWriteTransaction(String table, List<String> families)
+        throws IOException {
+        return rmImpl.beginWriteTransaction(table, families);
+    }
 
-  @Override
-  public Transaction beginWriteTransaction(String table,
-      List<String> families, long keepAlive) throws IOException {
-    return rmImpl.beginWriteTransaction(table, families, keepAlive);
-  }
+    @Override
+    public Transaction beginWriteTransaction(String table,
+                                             List<String> families, long keepAlive) throws IOException {
+        return rmImpl.beginWriteTransaction(table, families, keepAlive);
+    }
 
-  @Override
-  public void commitWriteTransaction(Transaction transaction)
-      throws IOException {
-    rmImpl.commitWriteTransaction(transaction);
-  }
+    @Override
+    public void commitWriteTransaction(Transaction transaction)
+        throws IOException {
+        rmImpl.commitWriteTransaction(transaction);
+    }
 
-  @Override
-  public void abortWriteTransaction(Transaction transaction)
-      throws IOException {
-    rmImpl.abortWriteTransaction(transaction);
-  }
+    @Override
+    public void abortWriteTransaction(Transaction transaction)
+        throws IOException {
+        rmImpl.abortWriteTransaction(transaction);
+    }
 
-  @Override
-  public TableSnapshot createSnapshot(String tableName) throws IOException {
-    return rmImpl.createSnapshot(tableName);
-  }
+    @Override
+    public TableSnapshot createSnapshot(String tableName) throws IOException {
+        return rmImpl.createSnapshot(tableName);
+    }
 
-  @Override
-  public TableSnapshot createSnapshot(String tableName, long revision)
-      throws IOException {
-    return rmImpl.createSnapshot(tableName, revision);
-  }
+    @Override
+    public TableSnapshot createSnapshot(String tableName, long revision)
+        throws IOException {
+        return rmImpl.createSnapshot(tableName, revision);
+    }
 
-  @Override
-  public void keepAlive(Transaction transaction) throws IOException {
-    rmImpl.keepAlive(transaction);
-  }
+    @Override
+    public void keepAlive(Transaction transaction) throws IOException {
+        rmImpl.keepAlive(transaction);
+    }
 
-  @Override
-  public List<FamilyRevision> getAbortedWriteTransactions(String table,
-      String columnFamily) throws IOException {
-    return rmImpl.getAbortedWriteTransactions(table, columnFamily);
-  }
+    @Override
+    public List<FamilyRevision> getAbortedWriteTransactions(String table,
+                                                            String columnFamily) throws IOException {
+        return rmImpl.getAbortedWriteTransactions(table, columnFamily);
+    }
 
 }
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManagerEndpointClient.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManagerEndpointClient.java
index b2c026e..ee26242 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManagerEndpointClient.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManagerEndpointClient.java
@@ -33,92 +33,92 @@
  */
 public class RevisionManagerEndpointClient implements RevisionManager, Configurable {
 
-  private Configuration conf = null;
-  private RevisionManager rmProxy;
+    private Configuration conf = null;
+    private RevisionManager rmProxy;
 
-  @Override
-  public Configuration getConf() {
-    return this.conf;
-  }
+    @Override
+    public Configuration getConf() {
+        return this.conf;
+    }
 
-  @Override
-  public void setConf(Configuration arg0) {
-    this.conf = arg0;
-  }
+    @Override
+    public void setConf(Configuration arg0) {
+        this.conf = arg0;
+    }
 
-  @Override
-  public void initialize(Configuration conf) {
-    // do nothing
-  }
+    @Override
+    public void initialize(Configuration conf) {
+        // do nothing
+    }
 
-  @Override
-  public void open() throws IOException {
-    // clone to adjust RPC settings unique to proxy
-    Configuration clonedConf = new Configuration(conf);
-    // conf.set("hbase.ipc.client.connect.max.retries", "0");
-    // conf.setInt(HConstants.HBASE_CLIENT_RPC_MAXATTEMPTS, 1);
-    clonedConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); // do not retry RPC
-    HTable table = new HTable(clonedConf, HConstants.ROOT_TABLE_NAME);
-    rmProxy = table.coprocessorProxy(RevisionManagerProtocol.class,
-        Bytes.toBytes("anyRow"));
-    rmProxy.open();
-  }
+    @Override
+    public void open() throws IOException {
+        // clone to adjust RPC settings unique to proxy
+        Configuration clonedConf = new Configuration(conf);
+        // conf.set("hbase.ipc.client.connect.max.retries", "0");
+        // conf.setInt(HConstants.HBASE_CLIENT_RPC_MAXATTEMPTS, 1);
+        clonedConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); // do not retry RPC
+        HTable table = new HTable(clonedConf, HConstants.ROOT_TABLE_NAME);
+        rmProxy = table.coprocessorProxy(RevisionManagerProtocol.class,
+            Bytes.toBytes("anyRow"));
+        rmProxy.open();
+    }
 
-  @Override
-  public void close() throws IOException {
-    rmProxy.close();
-  }
+    @Override
+    public void close() throws IOException {
+        rmProxy.close();
+    }
 
-  @Override
-  public void createTable(String table, List<String> columnFamilies) throws IOException {
-    rmProxy.createTable(table, columnFamilies);
-  }
+    @Override
+    public void createTable(String table, List<String> columnFamilies) throws IOException {
+        rmProxy.createTable(table, columnFamilies);
+    }
 
-  @Override
-  public void dropTable(String table) throws IOException {
-    rmProxy.dropTable(table);
-  }
+    @Override
+    public void dropTable(String table) throws IOException {
+        rmProxy.dropTable(table);
+    }
 
-  @Override
-  public Transaction beginWriteTransaction(String table, List<String> families) throws IOException {
-    return rmProxy.beginWriteTransaction(table, families);
-  }
+    @Override
+    public Transaction beginWriteTransaction(String table, List<String> families) throws IOException {
+        return rmProxy.beginWriteTransaction(table, families);
+    }
 
-  @Override
-  public Transaction beginWriteTransaction(String table, List<String> families, long keepAlive)
-      throws IOException {
-    return rmProxy.beginWriteTransaction(table, families, keepAlive);
-  }
+    @Override
+    public Transaction beginWriteTransaction(String table, List<String> families, long keepAlive)
+        throws IOException {
+        return rmProxy.beginWriteTransaction(table, families, keepAlive);
+    }
 
-  @Override
-  public void commitWriteTransaction(Transaction transaction) throws IOException {
-    rmProxy.commitWriteTransaction(transaction);
-  }
+    @Override
+    public void commitWriteTransaction(Transaction transaction) throws IOException {
+        rmProxy.commitWriteTransaction(transaction);
+    }
 
-  @Override
-  public void abortWriteTransaction(Transaction transaction) throws IOException {
-    rmProxy.abortWriteTransaction(transaction);
-  }
+    @Override
+    public void abortWriteTransaction(Transaction transaction) throws IOException {
+        rmProxy.abortWriteTransaction(transaction);
+    }
 
-  @Override
-  public List<FamilyRevision> getAbortedWriteTransactions(String table, String columnFamily)
-      throws IOException {
-    return rmProxy.getAbortedWriteTransactions(table, columnFamily);
-  }
+    @Override
+    public List<FamilyRevision> getAbortedWriteTransactions(String table, String columnFamily)
+        throws IOException {
+        return rmProxy.getAbortedWriteTransactions(table, columnFamily);
+    }
 
-  @Override
-  public TableSnapshot createSnapshot(String tableName) throws IOException {
-    return rmProxy.createSnapshot(tableName);
-  }
+    @Override
+    public TableSnapshot createSnapshot(String tableName) throws IOException {
+        return rmProxy.createSnapshot(tableName);
+    }
 
-  @Override
-  public TableSnapshot createSnapshot(String tableName, long revision) throws IOException {
-    return rmProxy.createSnapshot(tableName, revision);
-  }
+    @Override
+    public TableSnapshot createSnapshot(String tableName, long revision) throws IOException {
+        return rmProxy.createSnapshot(tableName, revision);
+    }
 
-  @Override
-  public void keepAlive(Transaction transaction) throws IOException {
-    rmProxy.keepAlive(transaction);
-  }
+    @Override
+    public void keepAlive(Transaction transaction) throws IOException {
+        rmProxy.keepAlive(transaction);
+    }
 
 }
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManagerFactory.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManagerFactory.java
index b7cf1bf..bd50e2e 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManagerFactory.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/RevisionManagerFactory.java
@@ -28,77 +28,77 @@
  */
 public class RevisionManagerFactory {
 
-  public static final String REVISION_MGR_IMPL_CLASS = "revision.manager.impl.class";
+    public static final String REVISION_MGR_IMPL_CLASS = "revision.manager.impl.class";
 
-  /**
-   * Gets an instance of revision manager.
-   *
-   * @param conf The configuration required to created the revision manager.
-   * @return the revision manager An instance of revision manager.
-   * @throws IOException Signals that an I/O exception has occurred.
-   */
-   private static RevisionManager getRevisionManager(String className, Configuration conf) throws IOException{
+    /**
+     * Gets an instance of revision manager.
+     *
+     * @param conf The configuration required to created the revision manager.
+     * @return the revision manager An instance of revision manager.
+     * @throws IOException Signals that an I/O exception has occurred.
+     */
+    private static RevisionManager getRevisionManager(String className, Configuration conf) throws IOException {
 
         RevisionManager revisionMgr;
         ClassLoader classLoader = Thread.currentThread()
-                .getContextClassLoader();
+            .getContextClassLoader();
         if (classLoader == null) {
             classLoader = RevisionManagerFactory.class.getClassLoader();
         }
         try {
             Class<? extends RevisionManager> revisionMgrClass = Class
-                    .forName(className, true , classLoader).asSubclass(RevisionManager.class);
+                .forName(className, true, classLoader).asSubclass(RevisionManager.class);
             revisionMgr = (RevisionManager) revisionMgrClass.newInstance();
             revisionMgr.initialize(conf);
         } catch (ClassNotFoundException e) {
             throw new IOException(
-                    "The implementation class of revision manager not found.",
-                    e);
+                "The implementation class of revision manager not found.",
+                e);
         } catch (InstantiationException e) {
             throw new IOException(
-                    "Exception encountered during instantiating revision manager implementation.",
-                    e);
+                "Exception encountered during instantiating revision manager implementation.",
+                e);
         } catch (IllegalAccessException e) {
             throw new IOException(
-                    "IllegalAccessException encountered during instantiating revision manager implementation.",
-                    e);
+                "IllegalAccessException encountered during instantiating revision manager implementation.",
+                e);
         } catch (IllegalArgumentException e) {
             throw new IOException(
-                    "IllegalArgumentException encountered during instantiating revision manager implementation.",
-                    e);
+                "IllegalArgumentException encountered during instantiating revision manager implementation.",
+                e);
         }
         return revisionMgr;
     }
 
-   /**
-    * Internally used by endpoint implementation to instantiate from different configuration setting.
-    * @param className
-    * @param conf
-    * @return
-    * @throws IOException
-    */
-   static RevisionManager getOpenedRevisionManager(String className, Configuration conf) throws IOException {
+    /**
+     * Internally used by endpoint implementation to instantiate from different configuration setting.
+     * @param className
+     * @param conf
+     * @return
+     * @throws IOException
+     */
+    static RevisionManager getOpenedRevisionManager(String className, Configuration conf) throws IOException {
 
-       RevisionManager revisionMgr = RevisionManagerFactory.getRevisionManager(className, conf);
-       if (revisionMgr instanceof Configurable) {
-         ((Configurable)revisionMgr).setConf(conf);
-       }
-       revisionMgr.open();
-       return revisionMgr;
-   }
+        RevisionManager revisionMgr = RevisionManagerFactory.getRevisionManager(className, conf);
+        if (revisionMgr instanceof Configurable) {
+            ((Configurable) revisionMgr).setConf(conf);
+        }
+        revisionMgr.open();
+        return revisionMgr;
+    }
 
-   /**
-    * Gets an instance of revision manager which is opened.
-    * The revision manager implementation can be specified as {@link #REVISION_MGR_IMPL_CLASS},
-    * default is {@link ZKBasedRevisionManager}.
-    * @param conf revision manager configuration
-    * @return RevisionManager An instance of revision manager.
-    * @throws IOException
-    */
-   public static RevisionManager getOpenedRevisionManager(Configuration conf) throws IOException {
-     String className = conf.get(RevisionManagerFactory.REVISION_MGR_IMPL_CLASS,
-         ZKBasedRevisionManager.class.getName());
-     return getOpenedRevisionManager(className, conf);
-   }
+    /**
+     * Gets an instance of revision manager which is opened.
+     * The revision manager implementation can be specified as {@link #REVISION_MGR_IMPL_CLASS},
+     * default is {@link ZKBasedRevisionManager}.
+     * @param conf revision manager configuration
+     * @return RevisionManager An instance of revision manager.
+     * @throws IOException
+     */
+    public static RevisionManager getOpenedRevisionManager(Configuration conf) throws IOException {
+        String className = conf.get(RevisionManagerFactory.REVISION_MGR_IMPL_CLASS,
+            ZKBasedRevisionManager.class.getName());
+        return getOpenedRevisionManager(className, conf);
+    }
 
 }
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/TableSnapshot.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/TableSnapshot.java
index b6eeafc..e79390a 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/TableSnapshot.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/TableSnapshot.java
@@ -37,7 +37,7 @@
     public TableSnapshot(String name, Map<String, Long> cfRevMap, long latestRevision) {
         this.name = name;
         if (cfRevMap == null) {
-          throw new IllegalArgumentException("revision map cannot be null");
+            throw new IllegalArgumentException("revision map cannot be null");
         }
         this.cfRevisionMap = cfRevMap;
         this.latestRevision = latestRevision;
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/Transaction.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/Transaction.java
index 70f6a2e..e2a3682 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/Transaction.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/Transaction.java
@@ -41,35 +41,35 @@
         this.revision = revision;
     }
 
-   /**
+    /**
      * @return The revision number associated with a transaction.
      */
-   public long getRevisionNumber(){
-       return this.revision;
-   }
+    public long getRevisionNumber() {
+        return this.revision;
+    }
 
     /**
      * @return The table name associated with a transaction.
      */
-   public String getTableName() {
+    public String getTableName() {
         return tableName;
     }
 
     /**
      * @return The column families associated with a transaction.
      */
-   public List<String> getColumnFamilies() {
+    public List<String> getColumnFamilies() {
         return columnFamilies;
     }
 
     /**
      * @return The expire timestamp associated with a transaction.
      */
-   long getTransactionExpireTimeStamp(){
+    long getTransactionExpireTimeStamp() {
         return this.timeStamp + this.keepAlive;
     }
 
-    void setKeepAlive(long seconds){
+    void setKeepAlive(long seconds) {
         this.keepAlive = seconds;
     }
 
@@ -78,7 +78,7 @@
      *
      * @return long  The keep alive value for the transaction.
      */
-    public long getKeepAliveValue(){
+    public long getKeepAliveValue() {
         return this.keepAlive;
     }
 
@@ -87,15 +87,15 @@
      *
      * @return FamilyRevision An instance of FamilyRevision associated with the transaction.
      */
-    FamilyRevision getFamilyRevisionInfo(){
+    FamilyRevision getFamilyRevisionInfo() {
         return new FamilyRevision(revision, getTransactionExpireTimeStamp());
     }
 
-   /**
-    * Keep alive transaction. This methods extends the expire timestamp of a
-    * transaction by the "keep alive" amount.
-    */
-   void keepAliveTransaction(){
+    /**
+     * Keep alive transaction. This methods extends the expire timestamp of a
+     * transaction by the "keep alive" amount.
+     */
+    void keepAliveTransaction() {
         this.timeStamp = this.timeStamp + this.keepAlive;
     }
 
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/ZKBasedRevisionManager.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/ZKBasedRevisionManager.java
index 5885517..50adc9f 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/ZKBasedRevisionManager.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/ZKBasedRevisionManager.java
@@ -35,7 +35,7 @@
 /**
  * The service for providing revision management to Hbase tables.
  */
-public class ZKBasedRevisionManager implements RevisionManager{
+public class ZKBasedRevisionManager implements RevisionManager {
 
     private static final Logger LOG = LoggerFactory.getLogger(ZKBasedRevisionManager.class);
     private String zkHostList;
@@ -51,19 +51,19 @@
     public void initialize(Configuration conf) {
         conf = new Configuration(conf);
         if (conf.get(RMConstants.ZOOKEEPER_HOSTLIST) == null) {
-           String zkHostList = conf.get(HConstants.ZOOKEEPER_QUORUM);
-           int port = conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT,
-                   HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT);
-           String[] splits = zkHostList.split(",");
-           StringBuffer sb = new StringBuffer();
-           for (String split : splits) {
-               sb.append(split);
-               sb.append(':');
-               sb.append(port);
-               sb.append(',');
-           }
-           sb.deleteCharAt(sb.length() - 1);
-           conf.set(RMConstants.ZOOKEEPER_HOSTLIST, sb.toString());
+            String zkHostList = conf.get(HConstants.ZOOKEEPER_QUORUM);
+            int port = conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT,
+                HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT);
+            String[] splits = zkHostList.split(",");
+            StringBuffer sb = new StringBuffer();
+            for (String split : splits) {
+                sb.append(split);
+                sb.append(':');
+                sb.append(port);
+                sb.append(',');
+            }
+            sb.deleteCharAt(sb.length() - 1);
+            conf.set(RMConstants.ZOOKEEPER_HOSTLIST, sb.toString());
         }
         this.zkHostList = conf.get(RMConstants.ZOOKEEPER_HOSTLIST);
         this.baseDir = conf.get(RMConstants.ZOOKEEPER_DATADIR);
@@ -91,11 +91,11 @@
     private void checkInputParams(String table, List<String> families) {
         if (table == null) {
             throw new IllegalArgumentException(
-                    "The table name must be specified for reading.");
+                "The table name must be specified for reading.");
         }
         if (families == null || families.isEmpty()) {
             throw new IllegalArgumentException(
-                    "At least one column family should be specified for reading.");
+                "At least one column family should be specified for reading.");
         }
     }
 
@@ -118,14 +118,14 @@
      * @see org.apache.hcatalog.hbase.snapshot.RevisionManager#beginWriteTransaction(java.lang.String, java.util.List, long)
      */
     public Transaction beginWriteTransaction(String table,
-            List<String> families, long keepAlive) throws IOException {
+                                             List<String> families, long keepAlive) throws IOException {
 
         checkInputParams(table, families);
         zkUtil.setUpZnodesForTable(table, families);
         long nextId = zkUtil.nextId(table);
         long expireTimestamp = zkUtil.getTimeStamp();
         Transaction transaction = new Transaction(table, families, nextId,
-                expireTimestamp);
+            expireTimestamp);
         if (keepAlive != -1) {
             transaction.setKeepAlive(keepAlive);
         } else {
@@ -135,32 +135,31 @@
         refreshTransactionList(transaction.getTableName());
         String lockPath = prepareLockNode(table);
         WriteLock wLock = new WriteLock(zkUtil.getSession(), lockPath,
-                Ids.OPEN_ACL_UNSAFE);
+            Ids.OPEN_ACL_UNSAFE);
         RMLockListener myLockListener = new RMLockListener();
         wLock.setLockListener(myLockListener);
         try {
             boolean lockGrabbed = wLock.lock();
             if (lockGrabbed == false) {
-              //TO DO : Let this request queue up and try obtaining lock.
+                //TO DO : Let this request queue up and try obtaining lock.
                 throw new IOException(
-                        "Unable to obtain lock while beginning transaction. "
-                                + transaction.toString());
+                    "Unable to obtain lock while beginning transaction. "
+                        + transaction.toString());
             } else {
                 List<String> colFamilies = transaction.getColumnFamilies();
                 FamilyRevision revisionData = transaction.getFamilyRevisionInfo();
                 for (String cfamily : colFamilies) {
                     String path = PathUtil.getRunningTxnInfoPath(
-                            baseDir, table, cfamily);
+                        baseDir, table, cfamily);
                     zkUtil.updateData(path, revisionData,
-                            ZKUtil.UpdateMode.APPEND);
+                        ZKUtil.UpdateMode.APPEND);
                 }
             }
         } catch (KeeperException e) {
             throw new IOException("Exception while obtaining lock.", e);
         } catch (InterruptedException e) {
             throw new IOException("Exception while obtaining lock.", e);
-        }
-        finally {
+        } finally {
             wLock.unlock();
         }
 
@@ -174,7 +173,7 @@
      * @see org.apache.hcatalog.hbase.snapshot.RevisionManager#beginWriteTransaction(java.lang.String, java.util.List)
      */
     public Transaction beginWriteTransaction(String table, List<String> families)
-            throws IOException {
+        throws IOException {
         return beginWriteTransaction(table, families, -1);
     }
 
@@ -188,25 +187,25 @@
 
         String lockPath = prepareLockNode(transaction.getTableName());
         WriteLock wLock = new WriteLock(zkUtil.getSession(), lockPath,
-                Ids.OPEN_ACL_UNSAFE);
+            Ids.OPEN_ACL_UNSAFE);
         RMLockListener myLockListener = new RMLockListener();
         wLock.setLockListener(myLockListener);
         try {
             boolean lockGrabbed = wLock.lock();
             if (lockGrabbed == false) {
-              //TO DO : Let this request queue up and try obtaining lock.
+                //TO DO : Let this request queue up and try obtaining lock.
                 throw new IOException(
-                        "Unable to obtain lock while commiting transaction. "
-                                + transaction.toString());
+                    "Unable to obtain lock while commiting transaction. "
+                        + transaction.toString());
             } else {
                 String tableName = transaction.getTableName();
                 List<String> colFamilies = transaction.getColumnFamilies();
                 FamilyRevision revisionData = transaction.getFamilyRevisionInfo();
                 for (String cfamily : colFamilies) {
                     String path = PathUtil.getRunningTxnInfoPath(
-                            baseDir, tableName, cfamily);
+                        baseDir, tableName, cfamily);
                     zkUtil.updateData(path, revisionData,
-                            ZKUtil.UpdateMode.REMOVE);
+                        ZKUtil.UpdateMode.REMOVE);
                 }
 
             }
@@ -214,8 +213,7 @@
             throw new IOException("Exception while obtaining lock.", e);
         } catch (InterruptedException e) {
             throw new IOException("Exception while obtaining lock.", e);
-        }
-        finally {
+        } finally {
             wLock.unlock();
         }
         LOG.info("Write Transaction committed: " + transaction.toString());
@@ -231,30 +229,30 @@
         refreshTransactionList(transaction.getTableName());
         String lockPath = prepareLockNode(transaction.getTableName());
         WriteLock wLock = new WriteLock(zkUtil.getSession(), lockPath,
-                Ids.OPEN_ACL_UNSAFE);
+            Ids.OPEN_ACL_UNSAFE);
         RMLockListener myLockListener = new RMLockListener();
         wLock.setLockListener(myLockListener);
         try {
             boolean lockGrabbed = wLock.lock();
             if (lockGrabbed == false) {
-              //TO DO : Let this request queue up and try obtaining lock.
+                //TO DO : Let this request queue up and try obtaining lock.
                 throw new IOException(
-                        "Unable to obtain lock while aborting transaction. "
-                                + transaction.toString());
+                    "Unable to obtain lock while aborting transaction. "
+                        + transaction.toString());
             } else {
                 String tableName = transaction.getTableName();
                 List<String> colFamilies = transaction.getColumnFamilies();
                 FamilyRevision revisionData = transaction
-                        .getFamilyRevisionInfo();
+                    .getFamilyRevisionInfo();
                 for (String cfamily : colFamilies) {
                     String path = PathUtil.getRunningTxnInfoPath(
-                            baseDir, tableName, cfamily);
+                        baseDir, tableName, cfamily);
                     zkUtil.updateData(path, revisionData,
-                            ZKUtil.UpdateMode.REMOVE);
+                        ZKUtil.UpdateMode.REMOVE);
                     path = PathUtil.getAbortInformationPath(baseDir,
-                            tableName, cfamily);
+                        tableName, cfamily);
                     zkUtil.updateData(path, revisionData,
-                            ZKUtil.UpdateMode.APPEND);
+                        ZKUtil.UpdateMode.APPEND);
                 }
 
             }
@@ -262,54 +260,53 @@
             throw new IOException("Exception while obtaining lock.", e);
         } catch (InterruptedException e) {
             throw new IOException("Exception while obtaining lock.", e);
-        }
-        finally {
+        } finally {
             wLock.unlock();
         }
         LOG.info("Write Transaction aborted: " + transaction.toString());
     }
 
 
-     /* @param transaction
-     /* @throws IOException
-      * @see org.apache.hcatalog.hbase.snapshot.RevsionManager#keepAlive(org.apache.hcatalog.hbase.snapshot.Transaction)
-      */
-     public void keepAlive(Transaction transaction)
-            throws IOException {
+    /* @param transaction
+   /* @throws IOException
+    * @see org.apache.hcatalog.hbase.snapshot.RevsionManager#keepAlive(org.apache.hcatalog.hbase.snapshot.Transaction)
+    */
+    public void keepAlive(Transaction transaction)
+        throws IOException {
 
-         refreshTransactionList(transaction.getTableName());
-         transaction.keepAliveTransaction();
-         String lockPath = prepareLockNode(transaction.getTableName());
-         WriteLock wLock = new WriteLock(zkUtil.getSession(), lockPath,
-                 Ids.OPEN_ACL_UNSAFE);
-         RMLockListener myLockListener = new RMLockListener();
-         wLock.setLockListener(myLockListener);
-         try {
-             boolean lockGrabbed = wLock.lock();
-             if (lockGrabbed == false) {
-               //TO DO : Let this request queue up and try obtaining lock.
-                 throw new IOException(
-                         "Unable to obtain lock for keep alive of transaction. "
-                                 + transaction.toString());
-             }else {
-                 String tableName = transaction.getTableName();
-                 List<String> colFamilies = transaction.getColumnFamilies();
-                 FamilyRevision revisionData = transaction.getFamilyRevisionInfo();
-                 for (String cfamily : colFamilies) {
-                     String path = PathUtil.getRunningTxnInfoPath(
-                             baseDir, tableName, cfamily);
-                     zkUtil.updateData(path, revisionData,
-                             ZKUtil.UpdateMode.KEEP_ALIVE);
-                 }
+        refreshTransactionList(transaction.getTableName());
+        transaction.keepAliveTransaction();
+        String lockPath = prepareLockNode(transaction.getTableName());
+        WriteLock wLock = new WriteLock(zkUtil.getSession(), lockPath,
+            Ids.OPEN_ACL_UNSAFE);
+        RMLockListener myLockListener = new RMLockListener();
+        wLock.setLockListener(myLockListener);
+        try {
+            boolean lockGrabbed = wLock.lock();
+            if (lockGrabbed == false) {
+                //TO DO : Let this request queue up and try obtaining lock.
+                throw new IOException(
+                    "Unable to obtain lock for keep alive of transaction. "
+                        + transaction.toString());
+            } else {
+                String tableName = transaction.getTableName();
+                List<String> colFamilies = transaction.getColumnFamilies();
+                FamilyRevision revisionData = transaction.getFamilyRevisionInfo();
+                for (String cfamily : colFamilies) {
+                    String path = PathUtil.getRunningTxnInfoPath(
+                        baseDir, tableName, cfamily);
+                    zkUtil.updateData(path, revisionData,
+                        ZKUtil.UpdateMode.KEEP_ALIVE);
+                }
 
-             }
-         } catch (KeeperException e) {
-             throw new IOException("Exception while obtaining lock.", e);
-         } catch (InterruptedException e) {
-             throw new IOException("Exception while obtaining lock.", e);
-         }finally {
-             wLock.unlock();
-         }
+            }
+        } catch (KeeperException e) {
+            throw new IOException("Exception while obtaining lock.", e);
+        } catch (InterruptedException e) {
+            throw new IOException("Exception while obtaining lock.", e);
+        } finally {
+            wLock.unlock();
+        }
 
     }
 
@@ -320,13 +317,13 @@
     /* @throws IOException
      * @see org.apache.hcatalog.hbase.snapshot.RevsionManager#createSnapshot(java.lang.String)
      */
-    public TableSnapshot createSnapshot(String tableName) throws IOException{
+    public TableSnapshot createSnapshot(String tableName) throws IOException {
         refreshTransactionList(tableName);
         long latestID = zkUtil.currentID(tableName);
         HashMap<String, Long> cfMap = new HashMap<String, Long>();
         List<String> columnFamilyNames = zkUtil.getColumnFamiliesOfTable(tableName);
 
-        for(String cfName: columnFamilyNames){
+        for (String cfName : columnFamilyNames) {
             String cfPath = PathUtil.getRunningTxnInfoPath(baseDir, tableName, cfName);
             List<FamilyRevision> tranxList = zkUtil.getTransactionList(cfPath);
             long version;
@@ -334,15 +331,15 @@
                 Collections.sort(tranxList);
                 // get the smallest running Transaction ID
                 long runningVersion = tranxList.get(0).getRevision();
-                version = runningVersion -1;
+                version = runningVersion - 1;
             } else {
                 version = latestID;
             }
             cfMap.put(cfName, version);
         }
 
-        TableSnapshot snapshot = new TableSnapshot(tableName, cfMap,latestID);
-        LOG.debug("Created snapshot For table: "+tableName+" snapshot: "+snapshot);
+        TableSnapshot snapshot = new TableSnapshot(tableName, cfMap, latestID);
+        LOG.debug("Created snapshot For table: " + tableName + " snapshot: " + snapshot);
         return snapshot;
     }
 
@@ -354,18 +351,18 @@
     /* @throws IOException
      * @see org.apache.hcatalog.hbase.snapshot.RevsionManager#createSnapshot(java.lang.String, long)
      */
-    public TableSnapshot createSnapshot(String tableName, long revision) throws IOException{
+    public TableSnapshot createSnapshot(String tableName, long revision) throws IOException {
 
         long currentID = zkUtil.currentID(tableName);
         if (revision > currentID) {
             throw new IOException(
-                    "The revision specified in the snapshot is higher than the current revision of the table.");
+                "The revision specified in the snapshot is higher than the current revision of the table.");
         }
         refreshTransactionList(tableName);
         HashMap<String, Long> cfMap = new HashMap<String, Long>();
         List<String> columnFamilies = zkUtil.getColumnFamiliesOfTable(tableName);
 
-        for(String cf: columnFamilies){
+        for (String cf : columnFamilies) {
             cfMap.put(cf, revision);
         }
 
@@ -380,40 +377,40 @@
      * @throws java.io.IOException
      */
     List<FamilyRevision> getRunningTransactions(String table,
-            String columnFamily) throws IOException {
+                                                String columnFamily) throws IOException {
         String path = PathUtil.getRunningTxnInfoPath(baseDir, table,
-                columnFamily);
+            columnFamily);
         return zkUtil.getTransactionList(path);
     }
 
     @Override
-     public List<FamilyRevision> getAbortedWriteTransactions(String table,
-            String columnFamily) throws IOException {
-         String path = PathUtil.getAbortInformationPath(baseDir, table, columnFamily);
-         return zkUtil.getTransactionList(path);
+    public List<FamilyRevision> getAbortedWriteTransactions(String table,
+                                                            String columnFamily) throws IOException {
+        String path = PathUtil.getAbortInformationPath(baseDir, table, columnFamily);
+        return zkUtil.getTransactionList(path);
     }
 
-     private void refreshTransactionList(String tableName) throws IOException{
+    private void refreshTransactionList(String tableName) throws IOException {
         String lockPath = prepareLockNode(tableName);
         WriteLock wLock = new WriteLock(zkUtil.getSession(), lockPath,
-                Ids.OPEN_ACL_UNSAFE);
+            Ids.OPEN_ACL_UNSAFE);
         RMLockListener myLockListener = new RMLockListener();
         wLock.setLockListener(myLockListener);
         try {
             boolean lockGrabbed = wLock.lock();
             if (lockGrabbed == false) {
-              //TO DO : Let this request queue up and try obtaining lock.
+                //TO DO : Let this request queue up and try obtaining lock.
                 throw new IOException(
-                        "Unable to obtain lock while refreshing transactions of table "
-                                + tableName + ".");
-            }else {
+                    "Unable to obtain lock while refreshing transactions of table "
+                        + tableName + ".");
+            } else {
                 List<String> cfPaths = zkUtil
-                        .getColumnFamiliesOfTable(tableName);
+                    .getColumnFamiliesOfTable(tableName);
                 for (String cf : cfPaths) {
                     String runningDataPath = PathUtil.getRunningTxnInfoPath(
-                            baseDir, tableName, cf);
+                        baseDir, tableName, cf);
                     zkUtil.refreshTransactions(runningDataPath);
-        }
+                }
 
             }
         } catch (KeeperException e) {
@@ -424,22 +421,22 @@
             wLock.unlock();
         }
 
-     }
+    }
 
-     private String prepareLockNode(String tableName) throws IOException{
-         String txnDataPath = PathUtil.getTxnDataPath(this.baseDir, tableName);
-         String lockPath = PathUtil.getLockManagementNode(txnDataPath);
-         zkUtil.ensurePathExists(lockPath, null, Ids.OPEN_ACL_UNSAFE,
-                 CreateMode.PERSISTENT);
-         return lockPath;
-     }
+    private String prepareLockNode(String tableName) throws IOException {
+        String txnDataPath = PathUtil.getTxnDataPath(this.baseDir, tableName);
+        String lockPath = PathUtil.getLockManagementNode(txnDataPath);
+        zkUtil.ensurePathExists(lockPath, null, Ids.OPEN_ACL_UNSAFE,
+            CreateMode.PERSISTENT);
+        return lockPath;
+    }
 
     /*
      * This class is a listener class for the locks used in revision management.
      * TBD: Use the following class to signal that that the lock is actually
      * been granted.
      */
-     class RMLockListener implements LockListener {
+    class RMLockListener implements LockListener {
 
         /*
          * @see org.apache.hcatalog.hbase.snapshot.lock.LockListener#lockAcquired()
@@ -457,7 +454,7 @@
 
         }
 
-     }
+    }
 
 
 }
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/ZKUtil.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/ZKUtil.java
index 8837bb5..de18241 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/ZKUtil.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/ZKUtil.java
@@ -43,15 +43,17 @@
 
 class ZKUtil {
 
-    private int              DEFAULT_SESSION_TIMEOUT = 1000000;
-    private ZooKeeper        zkSession;
-    private String           baseDir;
-    private String           connectString;
+    private int DEFAULT_SESSION_TIMEOUT = 1000000;
+    private ZooKeeper zkSession;
+    private String baseDir;
+    private String connectString;
     private static final Logger LOG = LoggerFactory.getLogger(ZKUtil.class);
 
     static enum UpdateMode {
         APPEND, REMOVE, KEEP_ALIVE
-    };
+    }
+
+    ;
 
     ZKUtil(String connection, String baseDir) {
         this.connectString = connection;
@@ -66,20 +68,20 @@
      * @throws IOException
      */
     void setUpZnodesForTable(String table, List<String> families)
-            throws IOException {
+        throws IOException {
 
         String transactionDataTablePath = PathUtil.getTxnDataPath(baseDir, table);
         ensurePathExists(transactionDataTablePath, null, Ids.OPEN_ACL_UNSAFE,
-                CreateMode.PERSISTENT);
+            CreateMode.PERSISTENT);
         for (String cf : families) {
             String runningDataPath = PathUtil.getRunningTxnInfoPath(
-                    this.baseDir, table, cf);
+                this.baseDir, table, cf);
             ensurePathExists(runningDataPath, null, Ids.OPEN_ACL_UNSAFE,
-                    CreateMode.PERSISTENT);
+                CreateMode.PERSISTENT);
             String abortDataPath = PathUtil.getAbortInformationPath(
-                    this.baseDir, table, cf);
+                this.baseDir, table, cf);
             ensurePathExists(abortDataPath, null, Ids.OPEN_ACL_UNSAFE,
-                    CreateMode.PERSISTENT);
+                CreateMode.PERSISTENT);
         }
 
     }
@@ -95,7 +97,7 @@
      * @throws IOException
      */
     void ensurePathExists(String path, byte[] data, List<ACL> acl,
-            CreateMode flags) throws IOException {
+                          CreateMode flags) throws IOException {
         String[] dirs = path.split("/");
         String parentPath = "";
         for (String subDir : dirs) {
@@ -108,7 +110,7 @@
                     }
                 } catch (Exception e) {
                     throw new IOException("Exception while creating path "
-                            + parentPath, e);
+                        + parentPath, e);
                 }
             }
         }
@@ -131,15 +133,15 @@
             children = getSession().getChildren(path, false);
         } catch (KeeperException e) {
             LOG.warn("Caught: ", e);
-            throw new IOException("Exception while obtaining columns of table.",e);
+            throw new IOException("Exception while obtaining columns of table.", e);
         } catch (InterruptedException e) {
             LOG.warn("Caught: ", e);
-            throw new IOException("Exception while obtaining columns of table.",e);
+            throw new IOException("Exception while obtaining columns of table.", e);
         }
 
         for (String child : children) {
             if ((child.contains("idgen") == false)
-                    && (child.contains("_locknode_") == false)) {
+                && (child.contains("_locknode_") == false)) {
                 columnFamlies.add(child);
             }
         }
@@ -157,7 +159,7 @@
         Stat stat;
         String clockPath = PathUtil.getClockPath(this.baseDir);
         ensurePathExists(clockPath, null, Ids.OPEN_ACL_UNSAFE,
-                CreateMode.PERSISTENT);
+            CreateMode.PERSISTENT);
         try {
             getSession().exists(clockPath, false);
             stat = getSession().setData(clockPath, null, -1);
@@ -184,10 +186,10 @@
     long nextId(String tableName) throws IOException {
         String idNode = PathUtil.getRevisionIDNode(this.baseDir, tableName);
         ensurePathExists(idNode, Bytes.toBytes("0"), Ids.OPEN_ACL_UNSAFE,
-                CreateMode.PERSISTENT);
+            CreateMode.PERSISTENT);
         String lockNode = PathUtil.getLockManagementNode(idNode);
         ensurePathExists(lockNode, null, Ids.OPEN_ACL_UNSAFE,
-                CreateMode.PERSISTENT);
+            CreateMode.PERSISTENT);
         IDGenerator idf = new IDGenerator(getSession(), tableName, idNode);
         long id = idf.obtainID();
         return id;
@@ -200,13 +202,13 @@
      * @return the long The revision number to use by any transaction.
      * @throws IOException Signals that an I/O exception has occurred.
      */
-    long currentID(String tableName) throws IOException{
+    long currentID(String tableName) throws IOException {
         String idNode = PathUtil.getRevisionIDNode(this.baseDir, tableName);
         ensurePathExists(idNode, Bytes.toBytes("0"), Ids.OPEN_ACL_UNSAFE,
-                CreateMode.PERSISTENT);
+            CreateMode.PERSISTENT);
         String lockNode = PathUtil.getLockManagementNode(idNode);
         ensurePathExists(lockNode, null, Ids.OPEN_ACL_UNSAFE,
-                CreateMode.PERSISTENT);
+            CreateMode.PERSISTENT);
         IDGenerator idf = new IDGenerator(getSession(), tableName, idNode);
         long id = idf.readID();
         return id;
@@ -221,7 +223,7 @@
      * @throws IOException
      */
     List<FamilyRevision> getTransactionList(String path)
-            throws IOException {
+        throws IOException {
 
         byte[] data = getRawData(path, new Stat());
         ArrayList<FamilyRevision> wtxnList = new ArrayList<FamilyRevision>();
@@ -235,7 +237,7 @@
         while (itr.hasNext()) {
             StoreFamilyRevision wtxn = itr.next();
             wtxnList.add(new FamilyRevision(wtxn.getRevision(), wtxn
-                    .getTimestamp()));
+                .getTimestamp()));
         }
 
         return wtxnList;
@@ -255,8 +257,8 @@
             data = getSession().getData(path, false, stat);
         } catch (Exception e) {
             throw new IOException(
-                    "Exception while obtaining raw data from zookeeper path "
-                            + path, e);
+                "Exception while obtaining raw data from zookeeper path "
+                    + path, e);
         }
         return data;
     }
@@ -271,9 +273,9 @@
         String txnBaseNode = PathUtil.getTransactionBasePath(this.baseDir);
         String clockNode = PathUtil.getClockPath(this.baseDir);
         ensurePathExists(txnBaseNode, null, Ids.OPEN_ACL_UNSAFE,
-                CreateMode.PERSISTENT);
+            CreateMode.PERSISTENT);
         ensurePathExists(clockNode, null, Ids.OPEN_ACL_UNSAFE,
-                CreateMode.PERSISTENT);
+            CreateMode.PERSISTENT);
     }
 
     /**
@@ -298,12 +300,12 @@
      * @return ZooKeeper An instance of zookeeper client.
      * @throws IOException
      */
-     ZooKeeper getSession() throws IOException {
+    ZooKeeper getSession() throws IOException {
         if (zkSession == null || zkSession.getState() == States.CLOSED) {
             synchronized (this) {
                 if (zkSession == null || zkSession.getState() == States.CLOSED) {
                     zkSession = new ZooKeeper(this.connectString,
-                            this.DEFAULT_SESSION_TIMEOUT, new ZKWatcher());
+                        this.DEFAULT_SESSION_TIMEOUT, new ZKWatcher());
                 }
             }
         }
@@ -319,11 +321,11 @@
      * @throws IOException
      */
     void updateData(String path, FamilyRevision updateTx, UpdateMode mode)
-            throws IOException {
+        throws IOException {
 
         if (updateTx == null) {
             throw new IOException(
-                    "The transaction to be updated found to be null.");
+                "The transaction to be updated found to be null.");
         }
         List<FamilyRevision> currentData = getTransactionList(path);
         List<FamilyRevision> newData = new ArrayList<FamilyRevision>();
@@ -337,36 +339,36 @@
             }
         }
         switch (mode) {
-            case REMOVE:
-                if (dataFound == false) {
-                    throw new IOException(
-                            "The transaction to be removed not found in the data.");
-                }
-                LOG.info("Removed trasaction : " + updateTx.toString());
-                break;
-            case KEEP_ALIVE:
-                if (dataFound == false) {
-                    throw new IOException(
-                            "The transaction to be kept alove not found in the data. It might have been expired.");
-                }
-                newData.add(updateTx);
-                LOG.info("keep alive of transaction : " + updateTx.toString());
-                break;
-            case APPEND:
-                if (dataFound == true) {
-                    throw new IOException(
-                            "The data to be appended already exists.");
-                }
-                newData.add(updateTx);
-                LOG.info("Added transaction : " + updateTx.toString());
-                break;
+        case REMOVE:
+            if (dataFound == false) {
+                throw new IOException(
+                    "The transaction to be removed not found in the data.");
+            }
+            LOG.info("Removed trasaction : " + updateTx.toString());
+            break;
+        case KEEP_ALIVE:
+            if (dataFound == false) {
+                throw new IOException(
+                    "The transaction to be kept alove not found in the data. It might have been expired.");
+            }
+            newData.add(updateTx);
+            LOG.info("keep alive of transaction : " + updateTx.toString());
+            break;
+        case APPEND:
+            if (dataFound == true) {
+                throw new IOException(
+                    "The data to be appended already exists.");
+            }
+            newData.add(updateTx);
+            LOG.info("Added transaction : " + updateTx.toString());
+            break;
         }
 
         // For serialization purposes.
         List<StoreFamilyRevision> newTxnList = new ArrayList<StoreFamilyRevision>();
         for (FamilyRevision wtxn : newData) {
             StoreFamilyRevision newTxn = new StoreFamilyRevision(wtxn.getRevision(),
-                    wtxn.getExpireTimestamp());
+                wtxn.getExpireTimestamp());
             newTxnList.add(newTxn);
         }
         StoreFamilyRevisionList wtxnList = new StoreFamilyRevisionList(newTxnList);
@@ -377,10 +379,10 @@
             stat = zkSession.setData(path, newByteData, -1);
         } catch (KeeperException e) {
             throw new IOException(
-                    "Exception while updating trasactional data. ", e);
+                "Exception while updating trasactional data. ", e);
         } catch (InterruptedException e) {
             throw new IOException(
-                    "Exception while updating trasactional data. ", e);
+                "Exception while updating trasactional data. ", e);
         }
 
         if (stat != null) {
@@ -395,7 +397,7 @@
      * @param path The path to the transaction data.
      * @throws IOException Signals that an I/O exception has occurred.
      */
-    void refreshTransactions(String path) throws IOException{
+    void refreshTransactions(String path) throws IOException {
         List<FamilyRevision> currentData = getTransactionList(path);
         List<FamilyRevision> newData = new ArrayList<FamilyRevision>();
 
@@ -405,11 +407,11 @@
             }
         }
 
-        if(newData.equals(currentData) == false){
+        if (newData.equals(currentData) == false) {
             List<StoreFamilyRevision> newTxnList = new ArrayList<StoreFamilyRevision>();
             for (FamilyRevision wtxn : newData) {
                 StoreFamilyRevision newTxn = new StoreFamilyRevision(wtxn.getRevision(),
-                        wtxn.getExpireTimestamp());
+                    wtxn.getExpireTimestamp());
                 newTxnList.add(newTxn);
             }
             StoreFamilyRevisionList wtxnList = new StoreFamilyRevisionList(newTxnList);
@@ -419,10 +421,10 @@
                 zkSession.setData(path, newByteData, -1);
             } catch (KeeperException e) {
                 throw new IOException(
-                        "Exception while updating trasactional data. ", e);
+                    "Exception while updating trasactional data. ", e);
             } catch (InterruptedException e) {
                 throw new IOException(
-                        "Exception while updating trasactional data. ", e);
+                    "Exception while updating trasactional data. ", e);
             }
 
         }
@@ -437,7 +439,7 @@
      */
     void deleteZNodes(String tableName) throws IOException {
         String transactionDataTablePath = PathUtil.getTxnDataPath(baseDir,
-                tableName);
+            tableName);
         deleteRecursively(transactionDataTablePath);
     }
 
@@ -452,10 +454,10 @@
             getSession().delete(path, -1);
         } catch (KeeperException e) {
             throw new IOException(
-                    "Exception while deleting path " + path + ".", e);
+                "Exception while deleting path " + path + ".", e);
         } catch (InterruptedException e) {
             throw new IOException(
-                    "Exception while deleting path " + path + ".", e);
+                "Exception while deleting path " + path + ".", e);
         }
     }
 
@@ -471,7 +473,7 @@
             return new byte[0];
         try {
             TSerializer serializer = new TSerializer(
-                    new TBinaryProtocol.Factory());
+                new TBinaryProtocol.Factory());
             byte[] bytes = serializer.serialize(obj);
             return bytes;
         } catch (Exception e) {
@@ -492,7 +494,7 @@
             return;
         try {
             TDeserializer deserializer = new TDeserializer(
-                    new TBinaryProtocol.Factory());
+                new TBinaryProtocol.Factory());
             deserializer.deserialize(obj, data);
         } catch (Exception e) {
             throw new IOException("Deserialization error: " + e.getMessage(), e);
@@ -502,12 +504,12 @@
     private class ZKWatcher implements Watcher {
         public void process(WatchedEvent event) {
             switch (event.getState()) {
-                case Expired:
-                    LOG.info("The client session has expired. Try opening a new "
-                            + "session and connecting again.");
-                    zkSession = null;
-                    break;
-                default:
+            case Expired:
+                LOG.info("The client session has expired. Try opening a new "
+                    + "session and connecting again.");
+                zkSession = null;
+                break;
+            default:
 
             }
         }
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/lock/WriteLock.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/lock/WriteLock.java
index d356327..ebcce3b 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/lock/WriteLock.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/lock/WriteLock.java
@@ -21,7 +21,9 @@
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.WatchedEvent;
 import org.apache.zookeeper.Watcher;
+
 import static org.apache.zookeeper.CreateMode.EPHEMERAL_SEQUENTIAL;
+
 import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.data.ACL;
 import org.apache.zookeeper.data.Stat;
@@ -78,7 +80,7 @@
      * @param callback the call back instance
      */
     public WriteLock(ZooKeeper zookeeper, String dir, List<ACL> acl,
-            LockListener callback) {
+                     LockListener callback) {
         this(zookeeper, dir, acl);
         this.callback = callback;
     }
@@ -126,15 +128,14 @@
             } catch (InterruptedException e) {
                 LOG.warn("Caught: " + e, e);
                 //set that we have been interrupted.
-               Thread.currentThread().interrupt();
+                Thread.currentThread().interrupt();
             } catch (KeeperException.NoNodeException e) {
                 // do nothing
             } catch (KeeperException e) {
                 LOG.warn("Caught: " + e, e);
                 throw (RuntimeException) new RuntimeException(e.getMessage()).
                     initCause(e);
-            }
-            finally {
+            } finally {
                 if (callback != null) {
                     callback.lockReleased();
                 }
@@ -152,7 +153,7 @@
         public void process(WatchedEvent event) {
             // lets either become the leader or watch the new/updated node
             LOG.debug("Watcher fired on path: " + event.getPath() + " state: " +
-                    event.getState() + " type " + event.getType());
+                event.getState() + " type " + event.getType());
             try {
                 lock();
             } catch (Exception e) {
@@ -165,7 +166,7 @@
      * a zoookeeper operation that is mainly responsible
      * for all the magic required for locking.
      */
-    private  class LockZooKeeperOperation implements ZooKeeperOperation {
+    private class LockZooKeeperOperation implements ZooKeeperOperation {
 
         /** find if we have been created earler if not create our node
          *
@@ -189,7 +190,7 @@
             }
             if (id == null) {
                 id = zookeeper.create(dir + "/" + prefix, data,
-                        getAcl(), EPHEMERAL_SEQUENTIAL);
+                    getAcl(), EPHEMERAL_SEQUENTIAL);
 
                 if (LOG.isDebugEnabled()) {
                     LOG.debug("Created id: " + id);
@@ -217,7 +218,7 @@
                     List<String> names = zookeeper.getChildren(dir, false);
                     if (names.isEmpty()) {
                         LOG.warn("No children in: " + dir + " when we've just " +
-                        "created one! Lets recreate it...");
+                            "created one! Lets recreate it...");
                         // lets force the recreation of the id
                         id = null;
                     } else {
@@ -240,7 +241,7 @@
                                 return Boolean.FALSE;
                             } else {
                                 LOG.warn("Could not find the" +
-                                		" stats for less than me: " + lastChildName.getName());
+                                    " stats for less than me: " + lastChildName.getName());
                             }
                         } else {
                             if (isOwner()) {
@@ -256,7 +257,9 @@
             while (id == null);
             return Boolean.FALSE;
         }
-    };
+    }
+
+    ;
 
     /**
      * Attempts to acquire the exclusive write lock returning whether or not it was
@@ -293,7 +296,7 @@
      * @return the id for this lock
      */
     public String getId() {
-       return this.id;
+        return this.id;
     }
 }
 
diff --git a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/lock/ZNodeName.java b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/lock/ZNodeName.java
index c6a49de..522e272 100644
--- a/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/lock/ZNodeName.java
+++ b/storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/snapshot/lock/ZNodeName.java
@@ -49,7 +49,7 @@
             } catch (NumberFormatException e) {
                 LOG.info("Number format exception for " + idx, e);
             } catch (ArrayIndexOutOfBoundsException e) {
-               LOG.info("Array out of bounds for " + idx, e);
+                LOG.info("Array out of bounds for " + idx, e);
             }
         }
     }
diff --git a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/ManyMiniCluster.java b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/ManyMiniCluster.java
index 30bc6de..28d6ec7 100644
--- a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/ManyMiniCluster.java
+++ b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/ManyMiniCluster.java
@@ -95,30 +95,30 @@
         miniZookeeperClusterEnabled = b.miniZookeeperClusterEnabled;
     }
 
-    protected synchronized  void start() {
+    protected synchronized void start() {
         try {
             if (!started) {
                 FileUtil.fullyDelete(workDir);
-                if(miniMRClusterEnabled) {
+                if (miniMRClusterEnabled) {
                     setupMRCluster();
                 }
-                if(miniZookeeperClusterEnabled || miniHBaseClusterEnabled) {
+                if (miniZookeeperClusterEnabled || miniHBaseClusterEnabled) {
                     miniZookeeperClusterEnabled = true;
                     setupZookeeper();
                 }
-                if(miniHBaseClusterEnabled) {
+                if (miniHBaseClusterEnabled) {
                     setupHBaseCluster();
                 }
-                if(miniHiveMetastoreEnabled) {
+                if (miniHiveMetastoreEnabled) {
                     setUpMetastore();
                 }
             }
-        } catch(Exception e) {
-            throw new IllegalStateException("Failed to setup cluster",e);
+        } catch (Exception e) {
+            throw new IllegalStateException("Failed to setup cluster", e);
         }
     }
 
-    protected synchronized  void stop() {
+    protected synchronized void stop() {
         if (hbaseCluster != null) {
             HConnectionManager.deleteAllConnections(true);
             try {
@@ -136,19 +136,19 @@
             }
             zookeeperCluster = null;
         }
-        if(mrCluster != null) {
+        if (mrCluster != null) {
             try {
                 mrCluster.shutdown();
-            } catch(Exception e) {
+            } catch (Exception e) {
                 e.printStackTrace();
             }
             mrCluster = null;
         }
-        if(dfsCluster != null) {
+        if (dfsCluster != null) {
             try {
                 dfsCluster.getFileSystem().close();
                 dfsCluster.shutdown();
-            } catch(Exception e) {
+            } catch (Exception e) {
                 e.printStackTrace();
             }
             dfsCluster = null;
@@ -189,7 +189,7 @@
         try {
             return FileSystem.get(jobConf);
         } catch (IOException e) {
-            throw new IllegalStateException("Failed to get FileSystem",e);
+            throw new IllegalStateException("Failed to get FileSystem", e);
         }
     }
 
@@ -205,38 +205,38 @@
             final int jobTrackerPort = findFreePort();
             final int taskTrackerPort = findFreePort();
 
-            if(jobConf == null)
+            if (jobConf == null)
                 jobConf = new JobConf();
 
             jobConf.setInt("mapred.submit.replication", 1);
             //conf.set("hadoop.job.history.location",new File(workDir).getAbsolutePath()+"/history");
-            System.setProperty("hadoop.log.dir",new File(workDir,"/logs").getAbsolutePath());
+            System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());
 
             mrCluster = new MiniMRCluster(jobTrackerPort,
-                                          taskTrackerPort,
-                                          numTaskTrackers,
-                                          getFileSystem().getUri().toString(),
-                                          numTaskTrackers,
-                                          null,
-                                          null,
-                                          null,
-                                          jobConf);
+                taskTrackerPort,
+                numTaskTrackers,
+                getFileSystem().getUri().toString(),
+                numTaskTrackers,
+                null,
+                null,
+                null,
+                jobConf);
 
             jobConf = mrCluster.createJobConf();
         } catch (IOException e) {
-            throw new IllegalStateException("Failed to Setup MR Cluster",e);
+            throw new IllegalStateException("Failed to Setup MR Cluster", e);
         }
     }
 
     private void setupZookeeper() {
         try {
-            zookeeperDir = new File(workDir,"zk").getAbsolutePath();
+            zookeeperDir = new File(workDir, "zk").getAbsolutePath();
             zookeeperPort = findFreePort();
             zookeeperCluster = new MiniZooKeeperCluster();
             zookeeperCluster.setDefaultClientPort(zookeeperPort);
             zookeeperCluster.startup(new File(zookeeperDir));
-        } catch(Exception e) {
-            throw new IllegalStateException("Failed to Setup Zookeeper Cluster",e);
+        } catch (Exception e) {
+            throw new IllegalStateException("Failed to Setup Zookeeper Cluster", e);
         }
     }
 
@@ -244,10 +244,10 @@
         final int numRegionServers = 1;
 
         try {
-            hbaseDir = new File(workDir,"hbase").getAbsolutePath();
+            hbaseDir = new File(workDir, "hbase").getAbsolutePath();
             hbaseRoot = "file://" + hbaseDir;
 
-            if(hbaseConf == null)
+            if (hbaseConf == null)
                 hbaseConf = HBaseConfiguration.create();
 
             hbaseConf.set("hbase.rootdir", hbaseRoot);
@@ -264,12 +264,12 @@
             //opening the META table ensures that cluster is running
             new HTable(hbaseConf, HConstants.META_TABLE_NAME);
         } catch (Exception e) {
-            throw new IllegalStateException("Failed to setup HBase Cluster",e);
+            throw new IllegalStateException("Failed to setup HBase Cluster", e);
         }
     }
 
     private void setUpMetastore() throws Exception {
-        if(hiveConf == null)
+        if (hiveConf == null)
             hiveConf = new HiveConf(this.getClass());
 
         //The default org.apache.hadoop.hive.ql.hooks.PreExecutePrinter hook
@@ -278,13 +278,13 @@
         hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
         hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
         hiveConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname,
-                     "jdbc:derby:"+new File(workDir+"/metastore_db")+";create=true");
+            "jdbc:derby:" + new File(workDir + "/metastore_db") + ";create=true");
         hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.toString(),
-                     new File(workDir,"warehouse").toString());
+            new File(workDir, "warehouse").toString());
         //set where derby logs
-        File derbyLogFile = new File(workDir+"/derby.log");
+        File derbyLogFile = new File(workDir + "/derby.log");
         derbyLogFile.createNewFile();
-        System.setProperty("derby.stream.error.file",derbyLogFile.getPath());
+        System.setProperty("derby.stream.error.file", derbyLogFile.getPath());
 
 
 //    Driver driver = new Driver(hiveConf);
diff --git a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/SkeletonHBaseTest.java b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/SkeletonHBaseTest.java
index fbadba3..ea082c2 100644
--- a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/SkeletonHBaseTest.java
+++ b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/SkeletonHBaseTest.java
@@ -46,7 +46,7 @@
 
     protected final static String DEFAULT_CONTEXT_HANDLE = "default";
 
-    protected static Map<String,Context> contextMap = new HashMap<String,Context>();
+    protected static Map<String, Context> contextMap = new HashMap<String, Context>();
     protected static Set<String> tableNames = new HashSet<String>();
 
     /**
@@ -59,7 +59,7 @@
         try {
             HBaseAdmin admin = new HBaseAdmin(getHbaseConf());
             HTableDescriptor tableDesc = new HTableDescriptor(tableName);
-            for(String family: families) {
+            for (String family : families) {
                 HColumnDescriptor columnDescriptor = new HColumnDescriptor(family);
                 tableDesc.addFamily(columnDescriptor);
             }
@@ -72,13 +72,13 @@
     }
 
     protected String newTableName(String prefix) {
-        String name =null;
+        String name = null;
         int tries = 100;
         do {
-            name = prefix+"_"+Math.abs(new Random().nextLong());
-        } while(tableNames.contains(name) && --tries > 0);
-        if(tableNames.contains(name))
-            throw new IllegalStateException("Couldn't find a unique table name, tableNames size: "+tableNames.size());
+            name = prefix + "_" + Math.abs(new Random().nextLong());
+        } while (tableNames.contains(name) && --tries > 0);
+        if (tableNames.contains(name))
+            throw new IllegalStateException("Couldn't find a unique table name, tableNames size: " + tableNames.size());
         tableNames.add(name);
         return name;
     }
@@ -89,8 +89,8 @@
      */
     @BeforeClass
     public static void setup() {
-        if(!contextMap.containsKey(getContextHandle()))
-            contextMap.put(getContextHandle(),new Context(getContextHandle()));
+        if (!contextMap.containsKey(getContextHandle()))
+            contextMap.put(getContextHandle(), new Context(getContextHandle()));
 
         contextMap.get(getContextHandle()).start();
     }
@@ -172,18 +172,18 @@
 
         public Context(String handle) {
             try {
-                testDir = new File(TEST_DIR+"/test_"+handle+"_"+Math.abs(new Random().nextLong())+"/").getCanonicalPath();
+                testDir = new File(TEST_DIR + "/test_" + handle + "_" + Math.abs(new Random().nextLong()) + "/").getCanonicalPath();
             } catch (IOException e) {
-                throw new IllegalStateException("Failed to generate testDir",e);
+                throw new IllegalStateException("Failed to generate testDir", e);
             }
-            System.out.println("Cluster work directory: "+testDir);
+            System.out.println("Cluster work directory: " + testDir);
         }
 
         public void start() {
-            if(usageCount++ == 0) {
-            	ManyMiniCluster.Builder b = ManyMiniCluster.create(new File(testDir));
+            if (usageCount++ == 0) {
+                ManyMiniCluster.Builder b = ManyMiniCluster.create(new File(testDir));
                 if (testConf != null) {
-                   b.hbaseConf(HBaseConfiguration.create(testConf));
+                    b.hbaseConf(HBaseConfiguration.create(testConf));
                 }
                 cluster = b.build();
                 cluster.start();
@@ -195,16 +195,16 @@
         }
 
         public void stop() {
-            if( --usageCount == 0)  {
+            if (--usageCount == 0) {
                 try {
                     cluster.stop();
                     cluster = null;
                 } finally {
-                    System.out.println("Trying to cleanup: "+testDir);
+                    System.out.println("Trying to cleanup: " + testDir);
                     try {
                         FileUtil.fullyDelete(new File(testDir));
                     } catch (IOException e) {
-                        throw new IllegalStateException("Failed to cleanup test dir",e);
+                        throw new IllegalStateException("Failed to cleanup test dir", e);
                     }
                 }
             }
diff --git a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/TestHBaseBulkOutputFormat.java b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/TestHBaseBulkOutputFormat.java
index a1faa0e..3875185 100644
--- a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/TestHBaseBulkOutputFormat.java
+++ b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/TestHBaseBulkOutputFormat.java
@@ -92,9 +92,9 @@
     public TestHBaseBulkOutputFormat() {
         allConf = getHiveConf();
         allConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
-                HCatSemanticAnalyzer.class.getName());
+            HCatSemanticAnalyzer.class.getName());
         allConf.set(HiveConf.ConfVars.HADOOPFS.varname, getFileSystem().getUri().toString());
-        allConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, new Path(getTestDir(),"warehouse").toString());
+        allConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, new Path(getTestDir(), "warehouse").toString());
 
         //Add hbase properties
         for (Map.Entry<String, String> el : getHbaseConf())
@@ -103,8 +103,8 @@
             allConf.set(el.getKey(), el.getValue());
 
         HBaseConfiguration.merge(
-                allConf,
-                RevisionManagerConfiguration.create());
+            allConf,
+            RevisionManagerConfiguration.create());
         SessionState.start(new CliSessionState(allConf));
         hcatDriver = new HCatDriver();
     }
@@ -121,17 +121,17 @@
 
         @Override
         public void map(LongWritable key, Text value,
-                OutputCollector<ImmutableBytesWritable, Put> output,
-                Reporter reporter) throws IOException {
+                        OutputCollector<ImmutableBytesWritable, Put> output,
+                        Reporter reporter) throws IOException {
             String vals[] = value.toString().split(",");
             Put put = new Put(Bytes.toBytes(vals[0]));
-            for(int i=1;i<vals.length;i++) {
+            for (int i = 1; i < vals.length; i++) {
                 String pair[] = vals[i].split(":");
                 put.add(Bytes.toBytes("my_family"),
-                        Bytes.toBytes(pair[0]),
-                        Bytes.toBytes(pair[1]));
+                    Bytes.toBytes(pair[0]),
+                    Bytes.toBytes(pair[1]));
             }
-            output.collect(new ImmutableBytesWritable(Bytes.toBytes(vals[0])),put);
+            output.collect(new ImmutableBytesWritable(Bytes.toBytes(vals[0])), put);
         }
 
     }
@@ -142,37 +142,37 @@
         public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
             String vals[] = value.toString().split(",");
             Put put = new Put(Bytes.toBytes(vals[0]));
-            for(int i=1;i<vals.length;i++) {
+            for (int i = 1; i < vals.length; i++) {
                 String pair[] = vals[i].split(":");
                 put.add(Bytes.toBytes("my_family"),
-                        Bytes.toBytes(pair[0]),
-                        Bytes.toBytes(pair[1]));
+                    Bytes.toBytes(pair[0]),
+                    Bytes.toBytes(pair[1]));
             }
-            context.write(new ImmutableBytesWritable(Bytes.toBytes(vals[0])),put);
+            context.write(new ImmutableBytesWritable(Bytes.toBytes(vals[0])), put);
         }
     }
 
     public static class MapHCatWrite extends Mapper<LongWritable, Text, BytesWritable, HCatRecord> {
         @Override
         public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
-            OutputJobInfo jobInfo = (OutputJobInfo)HCatUtil.deserialize(context.getConfiguration().get(HCatConstants.HCAT_KEY_OUTPUT_INFO));
+            OutputJobInfo jobInfo = (OutputJobInfo) HCatUtil.deserialize(context.getConfiguration().get(HCatConstants.HCAT_KEY_OUTPUT_INFO));
             HCatRecord record = new DefaultHCatRecord(3);
             HCatSchema schema = jobInfo.getOutputSchema();
             String vals[] = value.toString().split(",");
-            record.setInteger("key",schema,Integer.parseInt(vals[0]));
-            for(int i=1;i<vals.length;i++) {
+            record.setInteger("key", schema, Integer.parseInt(vals[0]));
+            for (int i = 1; i < vals.length; i++) {
                 String pair[] = vals[i].split(":");
-                record.set(pair[0],schema,pair[1]);
+                record.set(pair[0], schema, pair[1]);
             }
-            context.write(null,record);
+            context.write(null, record);
         }
     }
 
     @Test
     public void hbaseBulkOutputFormatTest() throws IOException, ClassNotFoundException, InterruptedException {
         String testName = "hbaseBulkOutputFormatTest";
-        Path methodTestDir = new Path(getTestDir(),testName);
-        LOG.info("starting: "+testName);
+        Path methodTestDir = new Path(getTestDir(), testName);
+        LOG.info("starting: " + testName);
 
         String tableName = newTableName(testName).toLowerCase();
         String familyName = "my_family";
@@ -186,21 +186,20 @@
         createTable(tableName, new String[]{familyName});
 
         String data[] = {"1,english:one,spanish:uno",
-                               "2,english:two,spanish:dos",
-                               "3,english:three,spanish:tres"};
-
+            "2,english:two,spanish:dos",
+            "3,english:three,spanish:tres"};
 
 
         // input/output settings
-        Path inputPath = new Path(methodTestDir,"mr_input");
-        FSDataOutputStream os = getFileSystem().create(new Path(inputPath,"inputFile.txt"));
-        for(String line: data)
+        Path inputPath = new Path(methodTestDir, "mr_input");
+        FSDataOutputStream os = getFileSystem().create(new Path(inputPath, "inputFile.txt"));
+        for (String line : data)
             os.write(Bytes.toBytes(line + "\n"));
         os.close();
-        Path interPath = new Path(methodTestDir,"inter");
+        Path interPath = new Path(methodTestDir, "inter");
         //create job
         JobConf job = new JobConf(conf);
-        job.setWorkingDirectory(new Path(methodTestDir,"mr_work"));
+        job.setWorkingDirectory(new Path(methodTestDir, "mr_work"));
         job.setJarByClass(this.getClass());
         job.setMapperClass(MapWriteOldMapper.class);
 
@@ -217,9 +216,9 @@
             OutputJobInfo outputJobInfo = OutputJobInfo.create("default", tableName, null);
             Transaction txn = rm.beginWriteTransaction(tableName, Arrays.asList(familyName));
             outputJobInfo.getProperties().setProperty(HBaseConstants.PROPERTY_WRITE_TXN_KEY,
-                                                      HCatUtil.serialize(txn));
+                HCatUtil.serialize(txn));
             job.set(HCatConstants.HCAT_KEY_OUTPUT_INFO,
-                                       HCatUtil.serialize(outputJobInfo));
+                HCatUtil.serialize(outputJobInfo));
         } finally {
             rm.close();
         }
@@ -241,18 +240,18 @@
         Scan scan = new Scan();
         scan.addFamily(familyNameBytes);
         ResultScanner scanner = table.getScanner(scan);
-        int index=0;
-        for(Result result: scanner) {
+        int index = 0;
+        for (Result result : scanner) {
             String vals[] = data[index].toString().split(",");
-            for(int i=1;i<vals.length;i++) {
+            for (int i = 1; i < vals.length; i++) {
                 String pair[] = vals[i].split(":");
-                assertTrue(result.containsColumn(familyNameBytes,Bytes.toBytes(pair[0])));
-                assertEquals(pair[1],Bytes.toString(result.getValue(familyNameBytes,Bytes.toBytes(pair[0]))));
+                assertTrue(result.containsColumn(familyNameBytes, Bytes.toBytes(pair[0])));
+                assertEquals(pair[1], Bytes.toString(result.getValue(familyNameBytes, Bytes.toBytes(pair[0]))));
             }
             index++;
         }
         //test if load count is the same
-        assertEquals(data.length,index);
+        assertEquals(data.length, index);
         //test if scratch directory was erased
         assertFalse(FileSystem.get(job).exists(interPath));
     }
@@ -260,8 +259,8 @@
     @Test
     public void importSequenceFileTest() throws IOException, ClassNotFoundException, InterruptedException {
         String testName = "importSequenceFileTest";
-        Path methodTestDir = new Path(getTestDir(),testName);
-        LOG.info("starting: "+testName);
+        Path methodTestDir = new Path(getTestDir(), testName);
+        LOG.info("starting: " + testName);
 
         String tableName = newTableName(testName).toLowerCase();
         String familyName = "my_family";
@@ -271,28 +270,27 @@
         Configuration conf = new Configuration(allConf);
 
         //create table
-        createTable(tableName,new String[]{familyName});
+        createTable(tableName, new String[]{familyName});
 
         String data[] = {"1,english:one,spanish:uno",
-                               "2,english:two,spanish:dos",
-                               "3,english:three,spanish:tres"};
-
+            "2,english:two,spanish:dos",
+            "3,english:three,spanish:tres"};
 
 
         // input/output settings
-        Path inputPath = new Path(methodTestDir,"mr_input");
+        Path inputPath = new Path(methodTestDir, "mr_input");
         getFileSystem().mkdirs(inputPath);
-        FSDataOutputStream os = getFileSystem().create(new Path(inputPath,"inputFile.txt"));
-        for(String line: data)
+        FSDataOutputStream os = getFileSystem().create(new Path(inputPath, "inputFile.txt"));
+        for (String line : data)
             os.write(Bytes.toBytes(line + "\n"));
         os.close();
-        Path interPath = new Path(methodTestDir,"inter");
-        Path scratchPath = new Path(methodTestDir,"scratch");
+        Path interPath = new Path(methodTestDir, "inter");
+        Path scratchPath = new Path(methodTestDir, "scratch");
 
 
         //create job
         Job job = new Job(conf, testName);
-        job.setWorkingDirectory(new Path(methodTestDir,"mr_work"));
+        job.setWorkingDirectory(new Path(methodTestDir, "mr_work"));
         job.setJarByClass(this.getClass());
         job.setMapperClass(MapWrite.class);
 
@@ -300,7 +298,7 @@
         TextInputFormat.setInputPaths(job, inputPath);
 
         job.setOutputFormatClass(SequenceFileOutputFormat.class);
-        SequenceFileOutputFormat.setOutputPath(job,interPath);
+        SequenceFileOutputFormat.setOutputPath(job, interPath);
 
         job.setMapOutputKeyClass(ImmutableBytesWritable.class);
         job.setMapOutputValueClass(Put.class);
@@ -311,7 +309,7 @@
         job.setNumReduceTasks(0);
         assertTrue(job.waitForCompletion(true));
 
-        job = new Job(new Configuration(allConf),testName+"_importer");
+        job = new Job(new Configuration(allConf), testName + "_importer");
         assertTrue(ImportSequenceFile.runJob(job, tableName, interPath, scratchPath));
 
         //verify
@@ -319,18 +317,18 @@
         Scan scan = new Scan();
         scan.addFamily(familyNameBytes);
         ResultScanner scanner = table.getScanner(scan);
-        int index=0;
-        for(Result result: scanner) {
+        int index = 0;
+        for (Result result : scanner) {
             String vals[] = data[index].toString().split(",");
-            for(int i=1;i<vals.length;i++) {
+            for (int i = 1; i < vals.length; i++) {
                 String pair[] = vals[i].split(":");
-                assertTrue(result.containsColumn(familyNameBytes,Bytes.toBytes(pair[0])));
-                assertEquals(pair[1],Bytes.toString(result.getValue(familyNameBytes,Bytes.toBytes(pair[0]))));
+                assertTrue(result.containsColumn(familyNameBytes, Bytes.toBytes(pair[0])));
+                assertEquals(pair[1], Bytes.toString(result.getValue(familyNameBytes, Bytes.toBytes(pair[0]))));
             }
             index++;
         }
         //test if load count is the same
-        assertEquals(data.length,index);
+        assertEquals(data.length, index);
         //test if scratch directory was erased
         assertFalse(FileSystem.get(job.getConfiguration()).exists(scratchPath));
     }
@@ -338,11 +336,11 @@
     @Test
     public void bulkModeHCatOutputFormatTest() throws Exception {
         String testName = "bulkModeHCatOutputFormatTest";
-        Path methodTestDir = new Path(getTestDir(),testName);
-        LOG.info("starting: "+testName);
+        Path methodTestDir = new Path(getTestDir(), testName);
+        LOG.info("starting: " + testName);
 
         String databaseName = testName.toLowerCase();
-        String dbDir = new Path(methodTestDir,"DB_"+testName).toString();
+        String dbDir = new Path(methodTestDir, "DB_" + testName).toString();
         String tableName = newTableName(testName).toLowerCase();
         String familyName = "my_family";
         byte[] familyNameBytes = Bytes.toBytes(familyName);
@@ -355,31 +353,31 @@
 
         String dbquery = "CREATE DATABASE IF NOT EXISTS " + databaseName + " LOCATION '" + dbDir + "'";
         String tableQuery = "CREATE TABLE " + databaseName + "." + tableName +
-                              "(key int, english string, spanish string) STORED BY " +
-                              "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'" +
-                              "TBLPROPERTIES ('"+HBaseConstants.PROPERTY_BULK_OUTPUT_MODE_KEY+"'='true',"+
-                              "'hbase.columns.mapping'=':key,"+familyName+":english,"+familyName+":spanish')" ;
+            "(key int, english string, spanish string) STORED BY " +
+            "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'" +
+            "TBLPROPERTIES ('" + HBaseConstants.PROPERTY_BULK_OUTPUT_MODE_KEY + "'='true'," +
+            "'hbase.columns.mapping'=':key," + familyName + ":english," + familyName + ":spanish')";
 
         assertEquals(0, hcatDriver.run(dbquery).getResponseCode());
         assertEquals(0, hcatDriver.run(tableQuery).getResponseCode());
 
         String data[] = {"1,english:ONE,spanish:UNO",
-                               "2,english:TWO,spanish:DOS",
-                               "3,english:THREE,spanish:TRES"};
+            "2,english:TWO,spanish:DOS",
+            "3,english:THREE,spanish:TRES"};
 
         // input/output settings
-        Path inputPath = new Path(methodTestDir,"mr_input");
+        Path inputPath = new Path(methodTestDir, "mr_input");
         getFileSystem().mkdirs(inputPath);
         //create multiple files so we can test with multiple mappers
-        for(int i=0;i<data.length;i++) {
-            FSDataOutputStream os = getFileSystem().create(new Path(inputPath,"inputFile"+i+".txt"));
+        for (int i = 0; i < data.length; i++) {
+            FSDataOutputStream os = getFileSystem().create(new Path(inputPath, "inputFile" + i + ".txt"));
             os.write(Bytes.toBytes(data[i] + "\n"));
             os.close();
         }
 
         //create job
-        Job job = new Job(conf,testName);
-        job.setWorkingDirectory(new Path(methodTestDir,"mr_work"));
+        Job job = new Job(conf, testName);
+        job.setWorkingDirectory(new Path(methodTestDir, "mr_work"));
         job.setJarByClass(this.getClass());
         job.setMapperClass(MapHCatWrite.class);
 
@@ -388,8 +386,8 @@
 
 
         job.setOutputFormatClass(HCatOutputFormat.class);
-        OutputJobInfo outputJobInfo = OutputJobInfo.create(databaseName,tableName,null);
-        HCatOutputFormat.setOutput(job,outputJobInfo);
+        OutputJobInfo outputJobInfo = OutputJobInfo.create(databaseName, tableName, null);
+        HCatOutputFormat.setOutput(job, outputJobInfo);
 
         job.setMapOutputKeyClass(BytesWritable.class);
         job.setMapOutputValueClass(HCatRecord.class);
@@ -402,41 +400,41 @@
         assertTrue(job.waitForCompletion(true));
         RevisionManager rm = HBaseRevisionManagerUtil.getOpenedRevisionManager(conf);
         try {
-            TableSnapshot snapshot = rm.createSnapshot(databaseName+"."+tableName);
-            for(String el: snapshot.getColumnFamilies()) {
-                assertEquals(1,snapshot.getRevision(el));
+            TableSnapshot snapshot = rm.createSnapshot(databaseName + "." + tableName);
+            for (String el : snapshot.getColumnFamilies()) {
+                assertEquals(1, snapshot.getRevision(el));
             }
         } finally {
             rm.close();
         }
 
         //verify
-        HTable table = new HTable(conf, databaseName+"."+tableName);
+        HTable table = new HTable(conf, databaseName + "." + tableName);
         Scan scan = new Scan();
         scan.addFamily(familyNameBytes);
         ResultScanner scanner = table.getScanner(scan);
-        int index=0;
-        for(Result result: scanner) {
+        int index = 0;
+        for (Result result : scanner) {
             String vals[] = data[index].toString().split(",");
-            for(int i=1;i<vals.length;i++) {
+            for (int i = 1; i < vals.length; i++) {
                 String pair[] = vals[i].split(":");
-                assertTrue(result.containsColumn(familyNameBytes,Bytes.toBytes(pair[0])));
-                assertEquals(pair[1],Bytes.toString(result.getValue(familyNameBytes,Bytes.toBytes(pair[0]))));
-                assertEquals(1l,result.getColumn(familyNameBytes,Bytes.toBytes(pair[0])).get(0).getTimestamp());
+                assertTrue(result.containsColumn(familyNameBytes, Bytes.toBytes(pair[0])));
+                assertEquals(pair[1], Bytes.toString(result.getValue(familyNameBytes, Bytes.toBytes(pair[0]))));
+                assertEquals(1l, result.getColumn(familyNameBytes, Bytes.toBytes(pair[0])).get(0).getTimestamp());
             }
             index++;
         }
         //test if load count is the same
-        assertEquals(data.length,index);
+        assertEquals(data.length, index);
     }
 
     @Test
     public void bulkModeHCatOutputFormatTestWithDefaultDB() throws Exception {
         String testName = "bulkModeHCatOutputFormatTestWithDefaultDB";
-        Path methodTestDir = new Path(getTestDir(),testName);
+        Path methodTestDir = new Path(getTestDir(), testName);
 
         String databaseName = "default";
-        String dbDir = new Path(methodTestDir,"DB_"+testName).toString();
+        String dbDir = new Path(methodTestDir, "DB_" + testName).toString();
         String tableName = newTableName(testName).toLowerCase();
         String familyName = "my_family";
         byte[] familyNameBytes = Bytes.toBytes(familyName);
@@ -449,29 +447,29 @@
 
         String dbquery = "CREATE DATABASE IF NOT EXISTS " + databaseName + " LOCATION '" + dbDir + "'";
         String tableQuery = "CREATE TABLE " + databaseName + "." + tableName +
-                              "(key int, english string, spanish string) STORED BY " +
-                              "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'" +
-                              "TBLPROPERTIES ('"+HBaseConstants.PROPERTY_BULK_OUTPUT_MODE_KEY+"'='true',"+
-                              "'hbase.columns.mapping'=':key,"+familyName+":english,"+familyName+":spanish')" ;
+            "(key int, english string, spanish string) STORED BY " +
+            "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'" +
+            "TBLPROPERTIES ('" + HBaseConstants.PROPERTY_BULK_OUTPUT_MODE_KEY + "'='true'," +
+            "'hbase.columns.mapping'=':key," + familyName + ":english," + familyName + ":spanish')";
 
         assertEquals(0, hcatDriver.run(dbquery).getResponseCode());
         assertEquals(0, hcatDriver.run(tableQuery).getResponseCode());
 
         String data[] = {"1,english:ONE,spanish:UNO",
-                               "2,english:TWO,spanish:DOS",
-                               "3,english:THREE,spanish:TRES"};
+            "2,english:TWO,spanish:DOS",
+            "3,english:THREE,spanish:TRES"};
 
         // input/output settings
-        Path inputPath = new Path(methodTestDir,"mr_input");
+        Path inputPath = new Path(methodTestDir, "mr_input");
         getFileSystem().mkdirs(inputPath);
-        FSDataOutputStream os = getFileSystem().create(new Path(inputPath,"inputFile.txt"));
-        for(String line: data)
+        FSDataOutputStream os = getFileSystem().create(new Path(inputPath, "inputFile.txt"));
+        for (String line : data)
             os.write(Bytes.toBytes(line + "\n"));
         os.close();
 
         //create job
-        Job job = new Job(conf,testName);
-        job.setWorkingDirectory(new Path(methodTestDir,"mr_work"));
+        Job job = new Job(conf, testName);
+        job.setWorkingDirectory(new Path(methodTestDir, "mr_work"));
         job.setJarByClass(this.getClass());
         job.setMapperClass(MapHCatWrite.class);
 
@@ -480,8 +478,8 @@
 
 
         job.setOutputFormatClass(HCatOutputFormat.class);
-        OutputJobInfo outputJobInfo = OutputJobInfo.create(databaseName,tableName,null);
-        HCatOutputFormat.setOutput(job,outputJobInfo);
+        OutputJobInfo outputJobInfo = OutputJobInfo.create(databaseName, tableName, null);
+        HCatOutputFormat.setOutput(job, outputJobInfo);
 
         job.setMapOutputKeyClass(BytesWritable.class);
         job.setMapOutputValueClass(HCatRecord.class);
@@ -498,18 +496,18 @@
         Scan scan = new Scan();
         scan.addFamily(familyNameBytes);
         ResultScanner scanner = table.getScanner(scan);
-        int index=0;
-        for(Result result: scanner) {
+        int index = 0;
+        for (Result result : scanner) {
             String vals[] = data[index].toString().split(",");
-            for(int i=1;i<vals.length;i++) {
+            for (int i = 1; i < vals.length; i++) {
                 String pair[] = vals[i].split(":");
-                assertTrue(result.containsColumn(familyNameBytes,Bytes.toBytes(pair[0])));
-                assertEquals(pair[1],Bytes.toString(result.getValue(familyNameBytes,Bytes.toBytes(pair[0]))));
+                assertTrue(result.containsColumn(familyNameBytes, Bytes.toBytes(pair[0])));
+                assertEquals(pair[1], Bytes.toString(result.getValue(familyNameBytes, Bytes.toBytes(pair[0]))));
             }
             index++;
         }
         //test if load count is the same
-        assertEquals(data.length,index);
+        assertEquals(data.length, index);
     }
 
     @Test
@@ -526,37 +524,37 @@
         conf.set(HCatConstants.HCAT_KEY_HIVE_CONF, HCatUtil.serialize(allConf.getAllProperties()));
 
         String dbquery = "CREATE DATABASE IF NOT EXISTS " + databaseName + " LOCATION '" + dbDir
-                + "'";
+            + "'";
         String tableQuery = "CREATE TABLE " + databaseName + "." + tableName +
-                "(key int, english string, spanish string) STORED BY " +
-                "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'" +
-                "TBLPROPERTIES ('" + HBaseConstants.PROPERTY_BULK_OUTPUT_MODE_KEY + "'='true'," +
-                "'hbase.columns.mapping'=':key," + familyName + ":english," + familyName
-                + ":spanish')";
+            "(key int, english string, spanish string) STORED BY " +
+            "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'" +
+            "TBLPROPERTIES ('" + HBaseConstants.PROPERTY_BULK_OUTPUT_MODE_KEY + "'='true'," +
+            "'hbase.columns.mapping'=':key," + familyName + ":english," + familyName
+            + ":spanish')";
 
         assertEquals(0, hcatDriver.run(dbquery).getResponseCode());
         assertEquals(0, hcatDriver.run(tableQuery).getResponseCode());
 
         String data[] = {"1,english:ONE,spanish:UNO",
-                "2,english:TWO,spanish:DOS",
-                "3,english:THREE,spanish:TRES"};
+            "2,english:TWO,spanish:DOS",
+            "3,english:THREE,spanish:TRES"};
 
         Path inputPath = new Path(methodTestDir, "mr_input");
         getFileSystem().mkdirs(inputPath);
         // create multiple files so we can test with multiple mappers
         for (int i = 0; i < data.length; i++) {
             FSDataOutputStream os = getFileSystem().create(
-                    new Path(inputPath, "inputFile" + i + ".txt"));
+                new Path(inputPath, "inputFile" + i + ".txt"));
             os.write(Bytes.toBytes(data[i] + "\n"));
             os.close();
         }
 
         Path workingDir = new Path(methodTestDir, "mr_abort");
         OutputJobInfo outputJobInfo = OutputJobInfo.create(databaseName,
-                tableName, null);
+            tableName, null);
         Job job = configureJob(testName,
-                conf, workingDir, MapWriteAbortTransaction.class,
-                outputJobInfo, inputPath);
+            conf, workingDir, MapWriteAbortTransaction.class,
+            outputJobInfo, inputPath);
         assertFalse(job.waitForCompletion(true));
 
         // verify that revision manager has it as aborted transaction
@@ -566,7 +564,7 @@
             for (String family : snapshot.getColumnFamilies()) {
                 assertEquals(1, snapshot.getRevision(family));
                 List<FamilyRevision> abortedWriteTransactions = rm.getAbortedWriteTransactions(
-                        databaseName + "." + tableName, family);
+                    databaseName + "." + tableName, family);
                 assertEquals(1, abortedWriteTransactions.size());
                 assertEquals(1, abortedWriteTransactions.get(0).getRevision());
             }
@@ -585,7 +583,7 @@
 
         // verify that the storage handler input format returns empty results.
         Path outputDir = new Path(getTestDir(),
-                "mapred/testHBaseTableBulkIgnoreAbortedTransactions");
+            "mapred/testHBaseTableBulkIgnoreAbortedTransactions");
         FileSystem fs = getFileSystem();
         if (fs.exists(outputDir)) {
             fs.delete(outputDir, true);
@@ -595,7 +593,7 @@
         job.setMapperClass(MapReadAbortedTransaction.class);
         job.setInputFormatClass(HCatInputFormat.class);
         InputJobInfo inputJobInfo = InputJobInfo.create(databaseName,
-                tableName, null);
+            tableName, null);
         HCatInputFormat.setInput(job, inputJobInfo);
         job.setOutputFormatClass(TextOutputFormat.class);
         TextOutputFormat.setOutputPath(job, outputDir);
@@ -608,8 +606,8 @@
     }
 
     private Job configureJob(String jobName, Configuration conf,
-            Path workingDir, Class<? extends Mapper> mapperClass,
-            OutputJobInfo outputJobInfo, Path inputPath) throws IOException {
+                             Path workingDir, Class<? extends Mapper> mapperClass,
+                             OutputJobInfo outputJobInfo, Path inputPath) throws IOException {
         Job job = new Job(conf, jobName);
         job.setWorkingDirectory(workingDir);
         job.setJarByClass(this.getClass());
diff --git a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/TestHBaseDirectOutputFormat.java b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/TestHBaseDirectOutputFormat.java
index 349e1db..81a3099 100644
--- a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/TestHBaseDirectOutputFormat.java
+++ b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/TestHBaseDirectOutputFormat.java
@@ -86,9 +86,9 @@
     public TestHBaseDirectOutputFormat() {
         allConf = getHiveConf();
         allConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
-                HCatSemanticAnalyzer.class.getName());
+            HCatSemanticAnalyzer.class.getName());
         allConf.set(HiveConf.ConfVars.HADOOPFS.varname, getFileSystem().getUri().toString());
-        allConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, new Path(getTestDir(),"warehouse").toString());
+        allConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, new Path(getTestDir(), "warehouse").toString());
 
         //Add hbase properties
         for (Map.Entry<String, String> el : getHbaseConf())
@@ -96,8 +96,8 @@
         for (Map.Entry<String, String> el : getJobConf())
             allConf.set(el.getKey(), el.getValue());
         HBaseConfiguration.merge(
-                allConf,
-                RevisionManagerConfiguration.create());
+            allConf,
+            RevisionManagerConfiguration.create());
         SessionState.start(new CliSessionState(allConf));
         hcatDriver = new HCatDriver();
     }
@@ -105,7 +105,7 @@
     @Test
     public void directOutputFormatTest() throws IOException, ClassNotFoundException, InterruptedException {
         String testName = "directOutputFormatTest";
-        Path methodTestDir = new Path(getTestDir(),testName);
+        Path methodTestDir = new Path(getTestDir(), testName);
 
         String tableName = newTableName(testName).toLowerCase();
         String familyName = "my_family";
@@ -116,26 +116,25 @@
         conf.set(HCatConstants.HCAT_KEY_HIVE_CONF, HCatUtil.serialize(allConf.getAllProperties()));
 
         //create table
-        createTable(tableName,new String[]{familyName});
+        createTable(tableName, new String[]{familyName});
 
         String data[] = {"1,english:ONE,spanish:UNO",
-                "2,english:ONE,spanish:DOS",
-                "3,english:ONE,spanish:TRES"};
-
+            "2,english:ONE,spanish:DOS",
+            "3,english:ONE,spanish:TRES"};
 
 
         // input/output settings
-        Path inputPath = new Path(methodTestDir,"mr_input");
+        Path inputPath = new Path(methodTestDir, "mr_input");
         getFileSystem().mkdirs(inputPath);
-        FSDataOutputStream os = getFileSystem().create(new Path(inputPath,"inputFile.txt"));
-        for(String line: data)
+        FSDataOutputStream os = getFileSystem().create(new Path(inputPath, "inputFile.txt"));
+        for (String line : data)
             os.write(Bytes.toBytes(line + "\n"));
         os.close();
 
         //create job
         JobConf job = new JobConf(conf);
         job.setJobName(testName);
-        job.setWorkingDirectory(new Path(methodTestDir,"mr_work"));
+        job.setWorkingDirectory(new Path(methodTestDir, "mr_work"));
         job.setJarByClass(this.getClass());
         job.setMapperClass(MapWrite.class);
 
@@ -152,9 +151,9 @@
             OutputJobInfo outputJobInfo = OutputJobInfo.create("default", tableName, null);
             Transaction txn = rm.beginWriteTransaction(tableName, Arrays.asList(familyName));
             outputJobInfo.getProperties().setProperty(HBaseConstants.PROPERTY_WRITE_TXN_KEY,
-                                                      HCatUtil.serialize(txn));
+                HCatUtil.serialize(txn));
             job.set(HCatConstants.HCAT_KEY_OUTPUT_INFO,
-                                       HCatUtil.serialize(outputJobInfo));
+                HCatUtil.serialize(outputJobInfo));
         } finally {
             rm.close();
         }
@@ -174,26 +173,26 @@
         Scan scan = new Scan();
         scan.addFamily(familyNameBytes);
         ResultScanner scanner = table.getScanner(scan);
-        int index=0;
-        for(Result result: scanner) {
+        int index = 0;
+        for (Result result : scanner) {
             String vals[] = data[index].toString().split(",");
-            for(int i=1;i<vals.length;i++) {
+            for (int i = 1; i < vals.length; i++) {
                 String pair[] = vals[i].split(":");
-                assertTrue(result.containsColumn(familyNameBytes,Bytes.toBytes(pair[0])));
-                assertEquals(pair[1],Bytes.toString(result.getValue(familyNameBytes,Bytes.toBytes(pair[0]))));
+                assertTrue(result.containsColumn(familyNameBytes, Bytes.toBytes(pair[0])));
+                assertEquals(pair[1], Bytes.toString(result.getValue(familyNameBytes, Bytes.toBytes(pair[0]))));
             }
             index++;
         }
-        assertEquals(data.length,index);
+        assertEquals(data.length, index);
     }
 
     @Test
     public void directHCatOutputFormatTest() throws Exception {
         String testName = "directHCatOutputFormatTest";
-        Path methodTestDir = new Path(getTestDir(),testName);
+        Path methodTestDir = new Path(getTestDir(), testName);
 
         String databaseName = testName;
-        String dbDir = new Path(methodTestDir,"DB_"+testName).toString();
+        String dbDir = new Path(methodTestDir, "DB_" + testName).toString();
         String tableName = newTableName(testName);
         String familyName = "my_family";
         byte[] familyNameBytes = Bytes.toBytes(familyName);
@@ -207,24 +206,24 @@
 
         String dbquery = "CREATE DATABASE IF NOT EXISTS " + databaseName + " LOCATION '" + dbDir + "'";
         String tableQuery = "CREATE TABLE " + databaseName + "." + tableName +
-                              "(key int, english string, spanish string) STORED BY " +
-                              "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'" +
-                              "TBLPROPERTIES (" +
-                              "'hbase.columns.mapping'=':key,"+familyName+":english,"+familyName+":spanish')" ;
+            "(key int, english string, spanish string) STORED BY " +
+            "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'" +
+            "TBLPROPERTIES (" +
+            "'hbase.columns.mapping'=':key," + familyName + ":english," + familyName + ":spanish')";
 
         assertEquals(0, hcatDriver.run(dbquery).getResponseCode());
         assertEquals(0, hcatDriver.run(tableQuery).getResponseCode());
 
         String data[] = {"1,english:ONE,spanish:UNO",
-                               "2,english:ONE,spanish:DOS",
-                               "3,english:ONE,spanish:TRES"};
+            "2,english:ONE,spanish:DOS",
+            "3,english:ONE,spanish:TRES"};
 
         // input/output settings
-        Path inputPath = new Path(methodTestDir,"mr_input");
+        Path inputPath = new Path(methodTestDir, "mr_input");
         getFileSystem().mkdirs(inputPath);
         //create multiple files so we can test with multiple mappers
-        for(int i=0;i<data.length;i++) {
-            FSDataOutputStream os = getFileSystem().create(new Path(inputPath,"inputFile"+i+".txt"));
+        for (int i = 0; i < data.length; i++) {
+            FSDataOutputStream os = getFileSystem().create(new Path(inputPath, "inputFile" + i + ".txt"));
             os.write(Bytes.toBytes(data[i] + "\n"));
             os.close();
         }
@@ -232,16 +231,16 @@
         //create job
         Path workingDir = new Path(methodTestDir, "mr_work");
         OutputJobInfo outputJobInfo = OutputJobInfo.create(databaseName,
-                tableName, null);
+            tableName, null);
         Job job = configureJob(testName, conf, workingDir, MapHCatWrite.class,
-                outputJobInfo, inputPath);
+            outputJobInfo, inputPath);
         assertTrue(job.waitForCompletion(true));
 
         RevisionManager rm = HBaseRevisionManagerUtil.getOpenedRevisionManager(conf);
         try {
             TableSnapshot snapshot = rm.createSnapshot(hbaseTableName);
-            for(String el: snapshot.getColumnFamilies()) {
-                assertEquals(1,snapshot.getRevision(el));
+            for (String el : snapshot.getColumnFamilies()) {
+                assertEquals(1, snapshot.getRevision(el));
             }
         } finally {
             rm.close();
@@ -252,18 +251,18 @@
         Scan scan = new Scan();
         scan.addFamily(familyNameBytes);
         ResultScanner scanner = table.getScanner(scan);
-        int index=0;
-        for(Result result: scanner) {
+        int index = 0;
+        for (Result result : scanner) {
             String vals[] = data[index].toString().split(",");
-            for(int i=1;i<vals.length;i++) {
+            for (int i = 1; i < vals.length; i++) {
                 String pair[] = vals[i].split(":");
-                assertTrue(result.containsColumn(familyNameBytes,Bytes.toBytes(pair[0])));
-                assertEquals(pair[1],Bytes.toString(result.getValue(familyNameBytes,Bytes.toBytes(pair[0]))));
-                assertEquals(1l,result.getColumn(familyNameBytes,Bytes.toBytes(pair[0])).get(0).getTimestamp());
+                assertTrue(result.containsColumn(familyNameBytes, Bytes.toBytes(pair[0])));
+                assertEquals(pair[1], Bytes.toString(result.getValue(familyNameBytes, Bytes.toBytes(pair[0]))));
+                assertEquals(1l, result.getColumn(familyNameBytes, Bytes.toBytes(pair[0])).get(0).getTimestamp());
             }
             index++;
         }
-        assertEquals(data.length,index);
+        assertEquals(data.length, index);
     }
 
     @Test
@@ -283,36 +282,36 @@
         conf.set(HCatConstants.HCAT_KEY_HIVE_CONF, HCatUtil.serialize(allConf.getAllProperties()));
 
         String dbquery = "CREATE DATABASE IF NOT EXISTS " + databaseName + " LOCATION '" + dbDir
-                + "'";
+            + "'";
         String tableQuery = "CREATE TABLE " + databaseName + "." + tableName +
-                "(key int, english string, spanish string) STORED BY " +
-                "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'" +
-                "TBLPROPERTIES (" +
-                "'hbase.columns.mapping'=':key," + familyName + ":english," + familyName +
-                ":spanish','hbase.table.name'='"+ hbaseTableName +"')";
+            "(key int, english string, spanish string) STORED BY " +
+            "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'" +
+            "TBLPROPERTIES (" +
+            "'hbase.columns.mapping'=':key," + familyName + ":english," + familyName +
+            ":spanish','hbase.table.name'='" + hbaseTableName + "')";
 
         assertEquals(0, hcatDriver.run(dbquery).getResponseCode());
         assertEquals(0, hcatDriver.run(tableQuery).getResponseCode());
 
         String data[] = {"1,english:ONE,spanish:UNO",
-                "2,english:TWO,spanish:DOS",
-                "3,english:THREE,spanish:TRES"};
+            "2,english:TWO,spanish:DOS",
+            "3,english:THREE,spanish:TRES"};
 
         Path inputPath = new Path(methodTestDir, "mr_input");
         getFileSystem().mkdirs(inputPath);
         // create multiple files so we can test with multiple mappers
         for (int i = 0; i < data.length; i++) {
             FSDataOutputStream os = getFileSystem().create(
-                    new Path(inputPath, "inputFile" + i + ".txt"));
+                new Path(inputPath, "inputFile" + i + ".txt"));
             os.write(Bytes.toBytes(data[i] + "\n"));
             os.close();
         }
 
         Path workingDir = new Path(methodTestDir, "mr_abort");
         OutputJobInfo outputJobInfo = OutputJobInfo.create(databaseName,
-                tableName, null);
+            tableName, null);
         Job job = configureJob(testName, conf, workingDir, MapWriteAbortTransaction.class,
-                outputJobInfo, inputPath);
+            outputJobInfo, inputPath);
         assertFalse(job.waitForCompletion(true));
 
         // verify that revision manager has it as aborted transaction
@@ -322,7 +321,7 @@
             for (String family : snapshot.getColumnFamilies()) {
                 assertEquals(1, snapshot.getRevision(family));
                 List<FamilyRevision> abortedWriteTransactions = rm.getAbortedWriteTransactions(
-                        hbaseTableName, family);
+                    hbaseTableName, family);
                 assertEquals(1, abortedWriteTransactions.size());
                 assertEquals(1, abortedWriteTransactions.get(0).getRevision());
             }
@@ -339,15 +338,15 @@
         for (Result result : scanner) {
             String key = Bytes.toString(result.getRow());
             assertNotSame(MapWriteAbortTransaction.failedKey, key);
-            int index = Integer.parseInt(key)-1;
+            int index = Integer.parseInt(key) - 1;
             String vals[] = data[index].toString().split(",");
             for (int i = 1; i < vals.length; i++) {
                 String pair[] = vals[i].split(":");
                 assertTrue(result.containsColumn(familyNameBytes, Bytes.toBytes(pair[0])));
                 assertEquals(pair[1],
-                        Bytes.toString(result.getValue(familyNameBytes, Bytes.toBytes(pair[0]))));
+                    Bytes.toString(result.getValue(familyNameBytes, Bytes.toBytes(pair[0]))));
                 assertEquals(1l, result.getColumn(familyNameBytes, Bytes.toBytes(pair[0])).get(0)
-                        .getTimestamp());
+                    .getTimestamp());
             }
             count++;
         }
@@ -355,7 +354,7 @@
 
         // verify that the inputformat returns empty results.
         Path outputDir = new Path(getTestDir(),
-                "mapred/testHBaseTableIgnoreAbortedTransactions");
+            "mapred/testHBaseTableIgnoreAbortedTransactions");
         FileSystem fs = getFileSystem();
         if (fs.exists(outputDir)) {
             fs.delete(outputDir, true);
@@ -365,7 +364,7 @@
         job.setMapperClass(MapReadAbortedTransaction.class);
         job.setInputFormatClass(HCatInputFormat.class);
         InputJobInfo inputJobInfo = InputJobInfo.create(databaseName,
-                tableName, null);
+            tableName, null);
         HCatInputFormat.setInput(job, inputJobInfo);
         job.setOutputFormatClass(TextOutputFormat.class);
         TextOutputFormat.setOutputPath(job, outputDir);
@@ -378,8 +377,8 @@
     }
 
     private Job configureJob(String jobName, Configuration conf,
-            Path workingDir, Class<? extends Mapper> mapperClass,
-            OutputJobInfo outputJobInfo, Path inputPath) throws IOException {
+                             Path workingDir, Class<? extends Mapper> mapperClass,
+                             OutputJobInfo outputJobInfo, Path inputPath) throws IOException {
         Job job = new Job(conf, jobName);
         job.setWorkingDirectory(workingDir);
         job.setJarByClass(this.getClass());
@@ -409,16 +408,16 @@
 
         @Override
         public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
-            OutputJobInfo jobInfo = (OutputJobInfo)HCatUtil.deserialize(context.getConfiguration().get(HCatConstants.HCAT_KEY_OUTPUT_INFO));
+            OutputJobInfo jobInfo = (OutputJobInfo) HCatUtil.deserialize(context.getConfiguration().get(HCatConstants.HCAT_KEY_OUTPUT_INFO));
             HCatRecord record = new DefaultHCatRecord(3);
             HCatSchema schema = jobInfo.getOutputSchema();
             String vals[] = value.toString().split(",");
-            record.setInteger("key",schema,Integer.parseInt(vals[0]));
-            for(int i=1;i<vals.length;i++) {
+            record.setInteger("key", schema, Integer.parseInt(vals[0]));
+            for (int i = 1; i < vals.length; i++) {
                 String pair[] = vals[i].split(":");
-                record.set(pair[0],schema,pair[1]);
+                record.set(pair[0], schema, pair[1]);
             }
-            context.write(null,record);
+            context.write(null, record);
         }
     }
 
@@ -434,15 +433,15 @@
 
         @Override
         public void map(LongWritable key, Text value,
-                OutputCollector<BytesWritable, Put> output, Reporter reporter)
-                throws IOException {
+                        OutputCollector<BytesWritable, Put> output, Reporter reporter)
+            throws IOException {
             String vals[] = value.toString().split(",");
             Put put = new Put(Bytes.toBytes(vals[0]));
-            for(int i=1;i<vals.length;i++) {
+            for (int i = 1; i < vals.length; i++) {
                 String pair[] = vals[i].split(":");
                 put.add(Bytes.toBytes("my_family"),
-                        Bytes.toBytes(pair[0]),
-                        Bytes.toBytes(pair[1]));
+                    Bytes.toBytes(pair[0]),
+                    Bytes.toBytes(pair[1]));
             }
             output.collect(null, put);
         }
@@ -454,7 +453,7 @@
 
         @Override
         public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
-            OutputJobInfo jobInfo = (OutputJobInfo)HCatUtil.deserialize(context.getConfiguration().get(HCatConstants.HCAT_KEY_OUTPUT_INFO));
+            OutputJobInfo jobInfo = (OutputJobInfo) HCatUtil.deserialize(context.getConfiguration().get(HCatConstants.HCAT_KEY_OUTPUT_INFO));
             HCatRecord record = new DefaultHCatRecord(3);
             HCatSchema schema = jobInfo.getOutputSchema();
             String vals[] = value.toString().split(",");
@@ -477,18 +476,18 @@
     }
 
     static class MapReadAbortedTransaction
-            extends
-            Mapper<ImmutableBytesWritable, HCatRecord, WritableComparable<?>, Text> {
+        extends
+        Mapper<ImmutableBytesWritable, HCatRecord, WritableComparable<?>, Text> {
 
         @Override
         public void run(Context context) throws IOException,
-                InterruptedException {
+            InterruptedException {
             setup(context);
             if (context.nextKeyValue()) {
                 map(context.getCurrentKey(), context.getCurrentValue(), context);
                 while (context.nextKeyValue()) {
                     map(context.getCurrentKey(), context.getCurrentValue(),
-                            context);
+                        context);
                 }
                 throw new IOException("There should have been no records");
             }
@@ -497,7 +496,7 @@
 
         @Override
         public void map(ImmutableBytesWritable key, HCatRecord value,
-                Context context) throws IOException, InterruptedException {
+                        Context context) throws IOException, InterruptedException {
             System.out.println("HCat record value" + value.toString());
         }
     }
diff --git a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/TestHBaseInputFormat.java b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/TestHBaseInputFormat.java
index 9dfbc84..78724a1 100644
--- a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/TestHBaseInputFormat.java
+++ b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/TestHBaseInputFormat.java
@@ -76,19 +76,19 @@
 
 public class TestHBaseInputFormat extends SkeletonHBaseTest {
 
-    private static HiveConf   hcatConf;
+    private static HiveConf hcatConf;
     private static HCatDriver hcatDriver;
-    private final byte[] FAMILY     = Bytes.toBytes("testFamily");
+    private final byte[] FAMILY = Bytes.toBytes("testFamily");
     private final byte[] QUALIFIER1 = Bytes.toBytes("testQualifier1");
     private final byte[] QUALIFIER2 = Bytes.toBytes("testQualifier2");
 
-   public TestHBaseInputFormat() throws Exception {
+    public TestHBaseInputFormat() throws Exception {
         hcatConf = getHiveConf();
         hcatConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
-                HCatSemanticAnalyzer.class.getName());
+            HCatSemanticAnalyzer.class.getName());
         URI fsuri = getFileSystem().getUri();
         Path whPath = new Path(fsuri.getScheme(), fsuri.getAuthority(),
-                getTestDir());
+            getTestDir());
         hcatConf.set(HiveConf.ConfVars.HADOOPFS.varname, fsuri.toString());
         hcatConf.set(ConfVars.METASTOREWAREHOUSE.varname, whPath.toString());
 
@@ -100,7 +100,7 @@
             }
         }
         HBaseConfiguration.merge(hcatConf,
-               RevisionManagerConfiguration.create());
+            RevisionManagerConfiguration.create());
 
 
         SessionState.start(new CliSessionState(hcatConf));
@@ -123,7 +123,7 @@
                 put.add(FAMILY, QUALIFIER2, i, Bytes.toBytes("textValue-" + i));
                 myPuts.add(put);
                 Transaction tsx = rm.beginWriteTransaction(tableName,
-                        columnFamilies);
+                    columnFamilies);
                 rm.commitWriteTransaction(tsx);
             }
         } finally {
@@ -134,14 +134,14 @@
         return myPuts;
     }
 
-   private void populateHBaseTable(String tName, int revisions) throws IOException {
+    private void populateHBaseTable(String tName, int revisions) throws IOException {
         List<Put> myPuts = generatePuts(revisions, tName);
         HTable table = new HTable(getHbaseConf(), Bytes.toBytes(tName));
         table.put(myPuts);
     }
 
     private long populateHBaseTableQualifier1(String tName, int value, Boolean commit)
-            throws IOException {
+        throws IOException {
         List<String> columnFamilies = Arrays.asList("testFamily");
         RevisionManager rm = null;
         List<Put> myPuts = new ArrayList<Put>();
@@ -154,7 +154,7 @@
             Put put = new Put(Bytes.toBytes("testRow"));
             revision = tsx.getRevisionNumber();
             put.add(FAMILY, QUALIFIER1, revision,
-                    Bytes.toBytes("textValue-" + value));
+                Bytes.toBytes("textValue-" + value));
             myPuts.add(put);
 
             // If commit is null it is left as a running transaction
@@ -183,11 +183,11 @@
         String db_dir = getTestDir() + "/hbasedb";
 
         String dbquery = "CREATE DATABASE IF NOT EXISTS " + databaseName + " LOCATION '"
-                            + db_dir + "'";
+            + db_dir + "'";
         String tableQuery = "CREATE TABLE " + databaseName + "." + tableName
-                              + "(key string, testqualifier1 string, testqualifier2 string) STORED BY " +
-                              "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
-                              + "TBLPROPERTIES ('hbase.columns.mapping'=':key,testFamily:testQualifier1,testFamily:testQualifier2')" ;
+            + "(key string, testqualifier1 string, testqualifier2 string) STORED BY " +
+            "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
+            + "TBLPROPERTIES ('hbase.columns.mapping'=':key,testFamily:testQualifier1,testFamily:testQualifier2')";
 
         CommandProcessorResponse responseOne = hcatDriver.run(dbquery);
         assertEquals(0, responseOne.getResponseCode());
@@ -201,7 +201,7 @@
         populateHBaseTable(hbaseTableName, 5);
         Configuration conf = new Configuration(hcatConf);
         conf.set(HCatConstants.HCAT_KEY_HIVE_CONF,
-                HCatUtil.serialize(getHiveConf().getAllProperties()));
+            HCatUtil.serialize(getHiveConf().getAllProperties()));
 
         // output settings
         Path outputDir = new Path(getTestDir(), "mapred/testHbaseTableMRRead");
@@ -217,7 +217,7 @@
 
         job.setInputFormatClass(HCatInputFormat.class);
         InputJobInfo inputJobInfo = InputJobInfo.create(databaseName, tableName,
-                null);
+            null);
         HCatInputFormat.setInput(job, inputJobInfo);
         job.setOutputFormatClass(TextOutputFormat.class);
         TextOutputFormat.setOutputPath(job, outputDir);
@@ -232,7 +232,7 @@
         assertFalse(MapReadHTable.error);
         assertEquals(MapReadHTable.count, 1);
 
-        String dropTableQuery = "DROP TABLE " + hbaseTableName ;
+        String dropTableQuery = "DROP TABLE " + hbaseTableName;
         CommandProcessorResponse responseThree = hcatDriver.run(dropTableQuery);
         assertEquals(0, responseThree.getResponseCode());
 
@@ -251,11 +251,11 @@
         //Table name as specified by hbase.table.name property
         String hbaseTableName = "MyDB_" + tableName;
         String tableQuery = "CREATE TABLE " + tableName
-                              + "(key string, testqualifier1 string, testqualifier2 string) STORED BY "
-                              + "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
-                              + "TBLPROPERTIES ('hbase.columns.mapping'="
-                              + "':key,testFamily:testQualifier1,testFamily:testQualifier2',"
-                              + "'hbase.table.name'='" + hbaseTableName+ "')" ;
+            + "(key string, testqualifier1 string, testqualifier2 string) STORED BY "
+            + "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
+            + "TBLPROPERTIES ('hbase.columns.mapping'="
+            + "':key,testFamily:testQualifier1,testFamily:testQualifier2',"
+            + "'hbase.table.name'='" + hbaseTableName + "')";
 
         CommandProcessorResponse responseTwo = hcatDriver.run(tableQuery);
         assertEquals(0, responseTwo.getResponseCode());
@@ -268,7 +268,7 @@
 
         Configuration conf = new Configuration(hcatConf);
         conf.set(HCatConstants.HCAT_KEY_HIVE_CONF,
-                HCatUtil.serialize(getHiveConf().getAllProperties()));
+            HCatUtil.serialize(getHiveConf().getAllProperties()));
 
         // output settings
         Path outputDir = new Path(getTestDir(), "mapred/testHBaseTableProjectionReadMR");
@@ -282,7 +282,7 @@
         job.setMapperClass(MapReadProjHTable.class);
         job.setInputFormatClass(HCatInputFormat.class);
         InputJobInfo inputJobInfo = InputJobInfo.create(
-                MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, null);
+            MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, null);
         HCatInputFormat.setOutputSchema(job, getProjectionSchema());
         HCatInputFormat.setInput(job, inputJobInfo);
         job.setOutputFormatClass(TextOutputFormat.class);
@@ -296,7 +296,7 @@
         assertFalse(MapReadProjHTable.error);
         assertEquals(MapReadProjHTable.count, 1);
 
-        String dropTableQuery = "DROP TABLE " + tableName ;
+        String dropTableQuery = "DROP TABLE " + tableName;
         CommandProcessorResponse responseThree = hcatDriver.run(dropTableQuery);
         assertEquals(0, responseThree.getResponseCode());
 
@@ -309,10 +309,10 @@
 
         String tableName = newTableName("mytable");
         String tableQuery = "CREATE TABLE " + tableName
-                              + "(key string, testqualifier1 string, testqualifier2 string) STORED BY " +
-                              "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
-                              + "TBLPROPERTIES ('hbase.columns.mapping'=':key," +
-                                    "testFamily:testQualifier1,testFamily:testQualifier2')" ;
+            + "(key string, testqualifier1 string, testqualifier2 string) STORED BY " +
+            "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
+            + "TBLPROPERTIES ('hbase.columns.mapping'=':key," +
+            "testFamily:testQualifier1,testFamily:testQualifier2')";
 
         CommandProcessorResponse responseTwo = hcatDriver.run(tableQuery);
         assertEquals(0, responseTwo.getResponseCode());
@@ -325,7 +325,7 @@
 
         Configuration conf = new Configuration(hcatConf);
         conf.set(HCatConstants.HCAT_KEY_HIVE_CONF,
-                HCatUtil.serialize(getHiveConf().getAllProperties()));
+            HCatUtil.serialize(getHiveConf().getAllProperties()));
 
         // output settings
         Path outputDir = new Path(getTestDir(), "mapred/testHBaseTableProjectionReadMR");
@@ -341,7 +341,7 @@
         job.setInputFormat(HBaseInputFormat.class);
 
         InputJobInfo inputJobInfo = InputJobInfo.create(
-                MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, null);
+            MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, null);
         //Configure projection schema
         job.set(HCatConstants.HCAT_KEY_OUTPUT_SCHEMA, HCatUtil.serialize(getProjectionSchema()));
         Job newJob = new Job(job);
@@ -369,7 +369,7 @@
         assertFalse(MapReadProjHTable.error);
         assertEquals(MapReadProjHTable.count, 1);
 
-        String dropTableQuery = "DROP TABLE " + tableName ;
+        String dropTableQuery = "DROP TABLE " + tableName;
         CommandProcessorResponse responseThree = hcatDriver.run(dropTableQuery);
         assertEquals(0, responseThree.getResponseCode());
 
@@ -381,10 +381,10 @@
     public void TestHBaseTableIgnoreAbortedTransactions() throws Exception {
         String tableName = newTableName("mytable");
         String tableQuery = "CREATE TABLE " + tableName
-                              + "(key string, testqualifier1 string, testqualifier2 string) STORED BY " +
-                              "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
-                              + "TBLPROPERTIES ('hbase.columns.mapping'=':key," +
-                                    "testFamily:testQualifier1,testFamily:testQualifier2')" ;
+            + "(key string, testqualifier1 string, testqualifier2 string) STORED BY " +
+            "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
+            + "TBLPROPERTIES ('hbase.columns.mapping'=':key," +
+            "testFamily:testQualifier1,testFamily:testQualifier2')";
 
         CommandProcessorResponse responseTwo = hcatDriver.run(tableQuery);
         assertEquals(0, responseTwo.getResponseCode());
@@ -399,7 +399,7 @@
 
         Configuration conf = new Configuration(hcatConf);
         conf.set(HCatConstants.HCAT_KEY_HIVE_CONF,
-                HCatUtil.serialize(getHiveConf().getAllProperties()));
+            HCatUtil.serialize(getHiveConf().getAllProperties()));
 
         Path outputDir = new Path(getTestDir(), "mapred/testHBaseTableIgnoreAbortedTransactions");
         FileSystem fs = getFileSystem();
@@ -412,7 +412,7 @@
         MapReadHTable.resetCounters();
         job.setInputFormatClass(HCatInputFormat.class);
         InputJobInfo inputJobInfo = InputJobInfo.create(
-                MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, null);
+            MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, null);
         HCatInputFormat.setInput(job, inputJobInfo);
         job.setOutputFormatClass(TextOutputFormat.class);
         TextOutputFormat.setOutputPath(job, outputDir);
@@ -428,7 +428,7 @@
         assertFalse(MapReadHTable.error);
         assertEquals(1, MapReadHTable.count);
 
-        String dropTableQuery = "DROP TABLE " + tableName ;
+        String dropTableQuery = "DROP TABLE " + tableName;
         CommandProcessorResponse responseThree = hcatDriver.run(dropTableQuery);
         assertEquals(0, responseThree.getResponseCode());
 
@@ -440,10 +440,10 @@
     public void TestHBaseTableIgnoreAbortedAndRunningTransactions() throws Exception {
         String tableName = newTableName("mytable");
         String tableQuery = "CREATE TABLE " + tableName
-                              + "(key string, testqualifier1 string, testqualifier2 string) STORED BY " +
-                              "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
-                              + "TBLPROPERTIES ('hbase.columns.mapping'=':key," +
-                                    "testFamily:testQualifier1,testFamily:testQualifier2')" ;
+            + "(key string, testqualifier1 string, testqualifier2 string) STORED BY " +
+            "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
+            + "TBLPROPERTIES ('hbase.columns.mapping'=':key," +
+            "testFamily:testQualifier1,testFamily:testQualifier2')";
 
         CommandProcessorResponse responseTwo = hcatDriver.run(tableQuery);
         assertEquals(0, responseTwo.getResponseCode());
@@ -462,7 +462,7 @@
 
         Configuration conf = new Configuration(hcatConf);
         conf.set(HCatConstants.HCAT_KEY_HIVE_CONF,
-                HCatUtil.serialize(getHiveConf().getAllProperties()));
+            HCatUtil.serialize(getHiveConf().getAllProperties()));
 
         Path outputDir = new Path(getTestDir(), "mapred/testHBaseTableIgnoreAbortedTransactions");
         FileSystem fs = getFileSystem();
@@ -474,7 +474,7 @@
         job.setMapperClass(MapReadHTableRunningAbort.class);
         job.setInputFormatClass(HCatInputFormat.class);
         InputJobInfo inputJobInfo = InputJobInfo.create(
-                MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, null);
+            MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, null);
         HCatInputFormat.setInput(job, inputJobInfo);
         job.setOutputFormatClass(TextOutputFormat.class);
         TextOutputFormat.setOutputPath(job, outputDir);
@@ -489,7 +489,7 @@
         assertFalse(MapReadHTableRunningAbort.error);
         assertEquals(1, MapReadHTableRunningAbort.count);
 
-        String dropTableQuery = "DROP TABLE " + tableName ;
+        String dropTableQuery = "DROP TABLE " + tableName;
         CommandProcessorResponse responseThree = hcatDriver.run(dropTableQuery);
         assertEquals(0, responseThree.getResponseCode());
 
@@ -499,20 +499,20 @@
 
 
     static class MapReadHTable
-            extends
-            Mapper<ImmutableBytesWritable, HCatRecord, WritableComparable<?>, Text> {
+        extends
+        Mapper<ImmutableBytesWritable, HCatRecord, WritableComparable<?>, Text> {
 
         static boolean error = false;
         static int count = 0;
 
         @Override
         public void map(ImmutableBytesWritable key, HCatRecord value,
-                Context context) throws IOException, InterruptedException {
+                        Context context) throws IOException, InterruptedException {
             System.out.println("HCat record value" + value.toString());
             boolean correctValues = (value.size() == 3)
-                    && (value.get(0).toString()).equalsIgnoreCase("testRow")
-                    && (value.get(1).toString()).equalsIgnoreCase("textValue-5")
-                    && (value.get(2).toString()).equalsIgnoreCase("textValue-5");
+                && (value.get(0).toString()).equalsIgnoreCase("testRow")
+                && (value.get(1).toString()).equalsIgnoreCase("textValue-5")
+                && (value.get(2).toString()).equalsIgnoreCase("textValue-5");
 
             if (correctValues == false) {
                 error = true;
@@ -527,18 +527,19 @@
     }
 
     static class MapReadProjHTable
-            extends
-            Mapper<ImmutableBytesWritable, HCatRecord, WritableComparable<?>, Text> {
+        extends
+        Mapper<ImmutableBytesWritable, HCatRecord, WritableComparable<?>, Text> {
 
         static boolean error = false;
         static int count = 0;
+
         @Override
         public void map(ImmutableBytesWritable key, HCatRecord value,
-                Context context) throws IOException, InterruptedException {
+                        Context context) throws IOException, InterruptedException {
             System.out.println("HCat record value" + value.toString());
             boolean correctValues = (value.size() == 2)
-                    && (value.get(0).toString()).equalsIgnoreCase("testRow")
-                    && (value.get(1).toString()).equalsIgnoreCase("textValue-5");
+                && (value.get(0).toString()).equalsIgnoreCase("testRow")
+                && (value.get(1).toString()).equalsIgnoreCase("textValue-5");
 
             if (correctValues == false) {
                 error = true;
@@ -548,7 +549,7 @@
     }
 
     static class MapReadProjectionHTable
-            implements org.apache.hadoop.mapred.Mapper<ImmutableBytesWritable, Result, WritableComparable<?>, Text> {
+        implements org.apache.hadoop.mapred.Mapper<ImmutableBytesWritable, Result, WritableComparable<?>, Text> {
 
         static boolean error = false;
         static int count = 0;
@@ -563,15 +564,15 @@
 
         @Override
         public void map(ImmutableBytesWritable key, Result result,
-                OutputCollector<WritableComparable<?>, Text> output, Reporter reporter)
-                throws IOException {
+                        OutputCollector<WritableComparable<?>, Text> output, Reporter reporter)
+            throws IOException {
             System.out.println("Result " + result.toString());
             List<KeyValue> list = result.list();
             boolean correctValues = (list.size() == 1)
-                    && (Bytes.toString(list.get(0).getRow())).equalsIgnoreCase("testRow")
-                    && (Bytes.toString(list.get(0).getValue())).equalsIgnoreCase("textValue-5")
-                    && (Bytes.toString(list.get(0).getFamily())).equalsIgnoreCase("testFamily")
-                    && (Bytes.toString(list.get(0).getQualifier())).equalsIgnoreCase("testQualifier1");
+                && (Bytes.toString(list.get(0).getRow())).equalsIgnoreCase("testRow")
+                && (Bytes.toString(list.get(0).getValue())).equalsIgnoreCase("textValue-5")
+                && (Bytes.toString(list.get(0).getFamily())).equalsIgnoreCase("testFamily")
+                && (Bytes.toString(list.get(0).getQualifier())).equalsIgnoreCase("testQualifier1");
 
             if (correctValues == false) {
                 error = true;
@@ -581,20 +582,20 @@
     }
 
     static class MapReadHTableRunningAbort
-            extends
-            Mapper<ImmutableBytesWritable, HCatRecord, WritableComparable<?>, Text> {
+        extends
+        Mapper<ImmutableBytesWritable, HCatRecord, WritableComparable<?>, Text> {
 
         static boolean error = false;
         static int count = 0;
 
         @Override
         public void map(ImmutableBytesWritable key, HCatRecord value,
-                Context context) throws IOException, InterruptedException {
+                        Context context) throws IOException, InterruptedException {
             System.out.println("HCat record value" + value.toString());
             boolean correctValues = (value.size() == 3)
-                    && (value.get(0).toString()).equalsIgnoreCase("testRow")
-                    && (value.get(1).toString()).equalsIgnoreCase("textValue-3")
-                    && (value.get(2).toString()).equalsIgnoreCase("textValue-2");
+                && (value.get(0).toString()).equalsIgnoreCase("testRow")
+                && (value.get(1).toString()).equalsIgnoreCase("textValue-3")
+                && (value.get(2).toString()).equalsIgnoreCase("textValue-2");
 
             if (correctValues == false) {
                 error = true;
@@ -607,9 +608,9 @@
 
         HCatSchema schema = new HCatSchema(new ArrayList<HCatFieldSchema>());
         schema.append(new HCatFieldSchema("key", HCatFieldSchema.Type.STRING,
-                ""));
+            ""));
         schema.append(new HCatFieldSchema("testqualifier1",
-                HCatFieldSchema.Type.STRING, ""));
+            HCatFieldSchema.Type.STRING, ""));
         return schema;
     }
 
diff --git a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/TestSnapshots.java b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/TestSnapshots.java
index cb6e40a..e07bf46 100644
--- a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/TestSnapshots.java
+++ b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/TestSnapshots.java
@@ -42,16 +42,16 @@
 import org.junit.Test;
 
 public class TestSnapshots extends SkeletonHBaseTest {
-    private static HiveConf   hcatConf;
+    private static HiveConf hcatConf;
     private static HCatDriver hcatDriver;
 
     public void Initialize() throws Exception {
         hcatConf = getHiveConf();
         hcatConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
-                HCatSemanticAnalyzer.class.getName());
+            HCatSemanticAnalyzer.class.getName());
         URI fsuri = getFileSystem().getUri();
         Path whPath = new Path(fsuri.getScheme(), fsuri.getAuthority(),
-                getTestDir());
+            getTestDir());
         hcatConf.set(HiveConf.ConfVars.HADOOPFS.varname, fsuri.toString());
         hcatConf.set(ConfVars.METASTOREWAREHOUSE.varname, whPath.toString());
 
@@ -69,18 +69,18 @@
     }
 
     @Test
-    public void TestSnapshotConversion() throws Exception{
+    public void TestSnapshotConversion() throws Exception {
         Initialize();
         String tableName = newTableName("mytableOne");
         String databaseName = newTableName("mydatabase");
         String fullyQualTableName = databaseName + "." + tableName;
         String db_dir = getTestDir() + "/hbasedb";
         String dbquery = "CREATE DATABASE IF NOT EXISTS " + databaseName + " LOCATION '"
-                            + db_dir + "'";
+            + db_dir + "'";
         String tableQuery = "CREATE TABLE " + fullyQualTableName
-                              + "(key string, value1 string, value2 string) STORED BY " +
-                              "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
-                              + "TBLPROPERTIES ('hbase.columns.mapping'=':key,cf1:q1,cf2:q2')" ;
+            + "(key string, value1 string, value2 string) STORED BY " +
+            "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
+            + "TBLPROPERTIES ('hbase.columns.mapping'=':key,cf1:q1,cf2:q2')";
 
         CommandProcessorResponse cmdResponse = hcatDriver.run(dbquery);
         assertEquals(0, cmdResponse.getResponseCode());
@@ -90,7 +90,7 @@
         InputJobInfo inputInfo = InputJobInfo.create(databaseName, tableName, null);
         Configuration conf = new Configuration(hcatConf);
         conf.set(HCatConstants.HCAT_KEY_HIVE_CONF,
-                HCatUtil.serialize(getHiveConf().getAllProperties()));
+            HCatUtil.serialize(getHiveConf().getAllProperties()));
         Job job = new Job(conf);
         inputInfo.getProperties().setProperty(HBaseConstants.PROPERTY_TABLE_SNAPSHOT_KEY, "dummysnapshot");
         InitializeInput.setInput(job, inputInfo);
@@ -100,7 +100,7 @@
         Map<String, Long> revMap = new HashMap<String, Long>();
         revMap.put("cf1", 3L);
         revMap.put("cf2", 5L);
-        TableSnapshot hbaseSnapshot = new TableSnapshot(fullyQualTableName, revMap,-1);
+        TableSnapshot hbaseSnapshot = new TableSnapshot(fullyQualTableName, revMap, -1);
         HCatTableSnapshot hcatSnapshot = HBaseRevisionManagerUtil.convertSnapshot(hbaseSnapshot, inputInfo.getTableInfo());
 
         assertEquals(hcatSnapshot.getRevision("value1"), 3);
@@ -113,9 +113,9 @@
         tableName = newTableName("mytableTwo");
         fullyQualTableName = databaseName + "." + tableName;
         tableQuery = "CREATE TABLE " + fullyQualTableName
-        + "(key string, value1 string, value2 string) STORED BY " +
-        "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
-        + "TBLPROPERTIES ('hbase.columns.mapping'=':key,cf1:q1,cf1:q2')" ;
+            + "(key string, value1 string, value2 string) STORED BY " +
+            "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
+            + "TBLPROPERTIES ('hbase.columns.mapping'=':key,cf1:q1,cf1:q2')";
         cmdResponse = hcatDriver.run(tableQuery);
         assertEquals(0, cmdResponse.getResponseCode());
         revMap.clear();
diff --git a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestIDGenerator.java b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestIDGenerator.java
index 3f9498c..2a8fa8f 100644
--- a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestIDGenerator.java
+++ b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestIDGenerator.java
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 package org.apache.hcatalog.hbase.snapshot;
+
 import static org.junit.Assert.assertTrue;
 
 import java.util.ArrayList;
@@ -26,7 +27,7 @@
 import org.junit.Assert;
 import org.junit.Test;
 
-public class TestIDGenerator extends SkeletonHBaseTest{
+public class TestIDGenerator extends SkeletonHBaseTest {
 
     @Test
     public void testIDGeneration() throws Exception {
@@ -35,7 +36,7 @@
         String servers = getHbaseConf().get("hbase.zookeeper.quorum");
         String[] splits = servers.split(",");
         StringBuffer sb = new StringBuffer();
-        for(String split : splits){
+        for (String split : splits) {
             sb.append(split);
             sb.append(':');
             sb.append(port);
@@ -44,20 +45,20 @@
 
         String tableName = "myTable";
         long initId = zkutil.nextId(tableName);
-        for (int i=0; i<10; i++) {
+        for (int i = 0; i < 10; i++) {
             long id = zkutil.nextId(tableName);
             Assert.assertEquals(initId + (i + 1), id);
         }
     }
 
     @Test
-    public void testMultipleClients() throws InterruptedException{
+    public void testMultipleClients() throws InterruptedException {
 
         int port = getHbaseConf().getInt("hbase.zookeeper.property.clientPort", 2181);
         String servers = getHbaseConf().get("hbase.zookeeper.quorum");
         String[] splits = servers.split(",");
         StringBuffer sb = new StringBuffer();
-        for(String split : splits){
+        for (String split : splits) {
             sb.append(split);
             sb.append(':');
             sb.append(port);
@@ -65,30 +66,30 @@
 
         ArrayList<IDGenClient> clients = new ArrayList<IDGenClient>();
 
-        for(int i =0; i < 5; i++){
+        for (int i = 0; i < 5; i++) {
             IDGenClient idClient = new IDGenClient(sb.toString(), "/rm_base", 10, "testTable");
             clients.add(idClient);
         }
 
-        for(IDGenClient idClient : clients){
+        for (IDGenClient idClient : clients) {
             idClient.run();
         }
 
-        for(IDGenClient idClient : clients){
+        for (IDGenClient idClient : clients) {
             idClient.join();
         }
 
         HashMap<Long, Long> idMap = new HashMap<Long, Long>();
-        for(IDGenClient idClient : clients){
+        for (IDGenClient idClient : clients) {
             idMap.putAll(idClient.getIdMap());
         }
 
         ArrayList<Long> keys = new ArrayList<Long>(idMap.keySet());
         Collections.sort(keys);
         int startId = 1;
-        for(Long key: keys){
+        for (Long key : keys) {
             Long id = idMap.get(key);
-            System.out.println("Key: " + key + " Value "+ id);
+            System.out.println("Key: " + key + " Value " + id);
             assertTrue(id == startId);
             startId++;
 
diff --git a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManager.java b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManager.java
index 884ab30..bd0ee03 100644
--- a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManager.java
+++ b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManager.java
@@ -33,16 +33,16 @@
 import org.apache.zookeeper.data.Stat;
 import org.junit.Test;
 
-public class TestRevisionManager extends SkeletonHBaseTest{
+public class TestRevisionManager extends SkeletonHBaseTest {
 
     @Test
-    public void testBasicZNodeCreation() throws IOException, KeeperException, InterruptedException{
+    public void testBasicZNodeCreation() throws IOException, KeeperException, InterruptedException {
 
         int port = getHbaseConf().getInt("hbase.zookeeper.property.clientPort", 2181);
         String servers = getHbaseConf().get("hbase.zookeeper.quorum");
         String[] splits = servers.split(",");
         StringBuffer sb = new StringBuffer();
-        for(String split : splits){
+        for (String split : splits) {
             sb.append(split);
             sb.append(':');
             sb.append(port);
@@ -64,7 +64,7 @@
         Stat result = zk.exists(transactionDataTablePath, false);
         assertTrue(result != null);
 
-        for(String colFamiliy : columnFamilies){
+        for (String colFamiliy : columnFamilies) {
             String cfPath = transactionDataTablePath + "/" + colFamiliy;
             Stat resultTwo = zk.exists(cfPath, false);
             assertTrue(resultTwo != null);
@@ -73,13 +73,13 @@
     }
 
     @Test
-    public void testCommitTransaction() throws IOException{
+    public void testCommitTransaction() throws IOException {
 
         int port = getHbaseConf().getInt("hbase.zookeeper.property.clientPort", 2181);
         String servers = getHbaseConf().get("hbase.zookeeper.quorum");
         String[] splits = servers.split(",");
         StringBuffer sb = new StringBuffer();
-        for(String split : splits){
+        for (String split : splits) {
             sb.append(split);
             sb.append(':');
             sb.append(port);
@@ -87,7 +87,7 @@
 
         Configuration conf = RevisionManagerConfiguration.create(getHbaseConf());
         conf.set(RMConstants.ZOOKEEPER_DATADIR, "/rm_base");
-        ZKBasedRevisionManager  manager = new ZKBasedRevisionManager();
+        ZKBasedRevisionManager manager = new ZKBasedRevisionManager();
         manager.initialize(conf);
         manager.open();
         ZKUtil zkutil = new ZKUtil(sb.toString(), "/rm_base");
@@ -95,15 +95,15 @@
         String tableName = newTableName("testTable");
         List<String> columnFamilies = Arrays.asList("cf1", "cf2", "cf3");
         Transaction txn = manager.beginWriteTransaction(tableName,
-                columnFamilies);
+            columnFamilies);
 
         List<String> cfs = zkutil.getColumnFamiliesOfTable(tableName);
         assertTrue(cfs.size() == columnFamilies.size());
-        for (String cf : cfs){
+        for (String cf : cfs) {
             assertTrue(columnFamilies.contains(cf));
         }
 
-        for(String colFamily : columnFamilies){
+        for (String colFamily : columnFamilies) {
             String path = PathUtil.getRunningTxnInfoPath("/rm_base", tableName, colFamily);
             byte[] data = zkutil.getRawData(path, null);
             StoreFamilyRevisionList list = new StoreFamilyRevisionList();
@@ -115,7 +115,7 @@
 
         }
         manager.commitWriteTransaction(txn);
-        for(String colFamiliy : columnFamilies){
+        for (String colFamiliy : columnFamilies) {
             String path = PathUtil.getRunningTxnInfoPath("/rm_base", tableName, colFamiliy);
             byte[] data = zkutil.getRawData(path, null);
             StoreFamilyRevisionList list = new StoreFamilyRevisionList();
@@ -128,13 +128,13 @@
     }
 
     @Test
-    public void testAbortTransaction() throws IOException{
+    public void testAbortTransaction() throws IOException {
 
         int port = getHbaseConf().getInt("hbase.zookeeper.property.clientPort", 2181);
         String host = getHbaseConf().get("hbase.zookeeper.quorum");
         Configuration conf = RevisionManagerConfiguration.create(getHbaseConf());
         conf.set(RMConstants.ZOOKEEPER_DATADIR, "/rm_base");
-        ZKBasedRevisionManager  manager = new ZKBasedRevisionManager();
+        ZKBasedRevisionManager manager = new ZKBasedRevisionManager();
         manager.initialize(conf);
         manager.open();
         ZKUtil zkutil = new ZKUtil(host + ':' + port, "/rm_base");
@@ -145,12 +145,12 @@
         List<String> cfs = zkutil.getColumnFamiliesOfTable(tableName);
 
         assertTrue(cfs.size() == columnFamilies.size());
-        for (String cf : cfs){
+        for (String cf : cfs) {
             assertTrue(columnFamilies.contains(cf));
         }
 
-        for(String colFamiliy : columnFamilies){
-            String path = PathUtil.getRunningTxnInfoPath("/rm_base",tableName, colFamiliy);
+        for (String colFamiliy : columnFamilies) {
+            String path = PathUtil.getRunningTxnInfoPath("/rm_base", tableName, colFamiliy);
             byte[] data = zkutil.getRawData(path, null);
             StoreFamilyRevisionList list = new StoreFamilyRevisionList();
             ZKUtil.deserialize(list, data);
@@ -161,8 +161,8 @@
 
         }
         manager.abortWriteTransaction(txn);
-        for(String colFamiliy : columnFamilies){
-            String path = PathUtil.getRunningTxnInfoPath("/rm_base",tableName, colFamiliy);
+        for (String colFamiliy : columnFamilies) {
+            String path = PathUtil.getRunningTxnInfoPath("/rm_base", tableName, colFamiliy);
             byte[] data = zkutil.getRawData(path, null);
             StoreFamilyRevisionList list = new StoreFamilyRevisionList();
             ZKUtil.deserialize(list, data);
@@ -170,8 +170,8 @@
 
         }
 
-        for(String colFamiliy : columnFamilies){
-            String path = PathUtil.getAbortInformationPath("/rm_base",tableName, colFamiliy);
+        for (String colFamiliy : columnFamilies) {
+            String path = PathUtil.getAbortInformationPath("/rm_base", tableName, colFamiliy);
             byte[] data = zkutil.getRawData(path, null);
             StoreFamilyRevisionList list = new StoreFamilyRevisionList();
             ZKUtil.deserialize(list, data);
@@ -189,7 +189,7 @@
         String servers = getHbaseConf().get("hbase.zookeeper.quorum");
         String[] splits = servers.split(",");
         StringBuffer sb = new StringBuffer();
-        for(String split : splits){
+        for (String split : splits) {
             sb.append(split);
             sb.append(':');
             sb.append(port);
@@ -197,31 +197,31 @@
 
         Configuration conf = RevisionManagerConfiguration.create(getHbaseConf());
         conf.set(RMConstants.ZOOKEEPER_DATADIR, "/rm_base");
-        ZKBasedRevisionManager  manager = new ZKBasedRevisionManager();
+        ZKBasedRevisionManager manager = new ZKBasedRevisionManager();
         manager.initialize(conf);
         manager.open();
         String tableName = newTableName("testTable");
         List<String> columnFamilies = Arrays.asList("cf1", "cf2");
         Transaction txn = manager.beginWriteTransaction(tableName,
-                columnFamilies, 40);
+            columnFamilies, 40);
         Thread.sleep(100);
         try {
             manager.commitWriteTransaction(txn);
         } catch (Exception e) {
             assertTrue(e instanceof IOException);
             assertEquals(e.getMessage(),
-                    "The transaction to be removed not found in the data.");
+                "The transaction to be removed not found in the data.");
         }
 
     }
 
     @Test
-    public void testCreateSnapshot() throws IOException{
+    public void testCreateSnapshot() throws IOException {
         int port = getHbaseConf().getInt("hbase.zookeeper.property.clientPort", 2181);
         String host = getHbaseConf().get("hbase.zookeeper.quorum");
         Configuration conf = RevisionManagerConfiguration.create(getHbaseConf());
         conf.set(RMConstants.ZOOKEEPER_DATADIR, "/rm_base");
-        ZKBasedRevisionManager  manager = new ZKBasedRevisionManager();
+        ZKBasedRevisionManager manager = new ZKBasedRevisionManager();
         manager.initialize(conf);
         manager.open();
         String tableName = newTableName("testTable");
diff --git a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManagerEndpoint.java b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManagerEndpoint.java
index 13c2f45..b910195 100644
--- a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManagerEndpoint.java
+++ b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManagerEndpoint.java
@@ -34,172 +34,172 @@
 
 public class TestRevisionManagerEndpoint extends SkeletonHBaseTest {
 
-  static {
-    // test case specific mini cluster settings
-    testConf = new Configuration(false);
-    testConf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
-        "org.apache.hcatalog.hbase.snapshot.RevisionManagerEndpoint",
-        "org.apache.hadoop.hbase.coprocessor.GenericEndpoint");
-    testConf.set(RMConstants.REVISION_MGR_ENDPOINT_IMPL_CLASS, MockRM.class.getName());
-  }
+    static {
+        // test case specific mini cluster settings
+        testConf = new Configuration(false);
+        testConf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
+            "org.apache.hcatalog.hbase.snapshot.RevisionManagerEndpoint",
+            "org.apache.hadoop.hbase.coprocessor.GenericEndpoint");
+        testConf.set(RMConstants.REVISION_MGR_ENDPOINT_IMPL_CLASS, MockRM.class.getName());
+    }
 
-  /**
-   * Mock implementation to test the protocol/serialization
-   */
-  public static class MockRM implements RevisionManager {
+    /**
+     * Mock implementation to test the protocol/serialization
+     */
+    public static class MockRM implements RevisionManager {
 
-    private static class Invocation {
-      Invocation(String methodName, Object ret, Object... args) {
-          this.methodName = methodName;
-          this.args = args;
-          this.ret = ret;
-      }
+        private static class Invocation {
+            Invocation(String methodName, Object ret, Object... args) {
+                this.methodName = methodName;
+                this.args = args;
+                this.ret = ret;
+            }
 
-      String methodName;
-      Object[] args;
-      Object ret;
+            String methodName;
+            Object[] args;
+            Object ret;
 
-      private static boolean equals(Object obj1, Object obj2) {
-        if (obj1 == obj2) return true;
-        if (obj1 == null || obj2 == null) return false;
-        if (obj1 instanceof Transaction || obj1 instanceof TableSnapshot) {
-          return obj1.toString().equals(obj2.toString());
+            private static boolean equals(Object obj1, Object obj2) {
+                if (obj1 == obj2) return true;
+                if (obj1 == null || obj2 == null) return false;
+                if (obj1 instanceof Transaction || obj1 instanceof TableSnapshot) {
+                    return obj1.toString().equals(obj2.toString());
+                }
+                return obj1.equals(obj2);
+            }
+
+            @Override
+            public boolean equals(Object obj) {
+                Invocation other = (Invocation) obj;
+                if (this == other) return true;
+                if (other == null) return false;
+                if (this.args != other.args) {
+                    if (this.args == null || other.args == null) return false;
+                    if (this.args.length != other.args.length) return false;
+                    for (int i = 0; i < args.length; i++) {
+                        if (!equals(this.args[i], other.args[i])) return false;
+                    }
+                }
+                return equals(this.ret, other.ret);
+            }
+
+            @Override
+            public String toString() {
+                return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).
+                    append("method", this.methodName).
+                    append("args", this.args).
+                    append("returns", this.ret).
+                    toString();
+            }
         }
-        return obj1.equals(obj2);
-      }
 
-      @Override
-      public boolean equals(Object obj) {
-        Invocation other = (Invocation)obj;
-        if (this == other) return true;
-        if (other == null) return false;
-        if (this.args != other.args) {
-          if (this.args == null || other.args == null) return false;
-          if (this.args.length != other.args.length) return false;
-          for (int i=0; i<args.length; i++) {
-            if (!equals(this.args[i], other.args[i])) return false;
-          }
+        final static String DEFAULT_INSTANCE = "default";
+        final static Map<String, MockRM> INSTANCES = new ConcurrentHashMap<String, MockRM>();
+        Invocation lastCall;
+        boolean isOpen = false;
+
+        private <T extends Object> T recordCall(T result, Object... args) {
+            StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
+            lastCall = new Invocation(stackTrace[2].getMethodName(), result, args);
+            return result;
         }
-        return equals(this.ret, other.ret);
-      }
 
-      @Override
-      public String toString() {
-        return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).
-            append("method", this.methodName).
-            append("args", this.args).
-            append("returns", this.ret).
-            toString();
-      }
+        @Override
+        public void initialize(Configuration conf) {
+            if (!INSTANCES.containsKey(DEFAULT_INSTANCE))
+                INSTANCES.put(DEFAULT_INSTANCE, this);
+        }
+
+        @Override
+        public void open() throws IOException {
+            isOpen = true;
+        }
+
+        @Override
+        public void close() throws IOException {
+            isOpen = false;
+        }
+
+        @Override
+        public void createTable(String table, List<String> columnFamilies) throws IOException {
+        }
+
+        @Override
+        public void dropTable(String table) throws IOException {
+        }
+
+        @Override
+        public Transaction beginWriteTransaction(String table,
+                                                 List<String> families) throws IOException {
+            return recordCall(null, table, families);
+        }
+
+        @Override
+        public Transaction beginWriteTransaction(String table,
+                                                 List<String> families, long keepAlive) throws IOException {
+            return recordCall(null, table, families, keepAlive);
+        }
+
+        @Override
+        public void commitWriteTransaction(Transaction transaction)
+            throws IOException {
+        }
+
+        @Override
+        public void abortWriteTransaction(Transaction transaction)
+            throws IOException {
+        }
+
+        @Override
+        public List<FamilyRevision> getAbortedWriteTransactions(String table,
+                                                                String columnFamily) throws IOException {
+            return null;
+        }
+
+        @Override
+        public TableSnapshot createSnapshot(String tableName)
+            throws IOException {
+            return null;
+        }
+
+        @Override
+        public TableSnapshot createSnapshot(String tableName, long revision)
+            throws IOException {
+            TableSnapshot ret = new TableSnapshot(tableName, new HashMap<String, Long>(), revision);
+            return recordCall(ret, tableName, revision);
+        }
+
+        @Override
+        public void keepAlive(Transaction transaction) throws IOException {
+            recordCall(null, transaction);
+        }
     }
 
-    final static String DEFAULT_INSTANCE = "default";
-    final static Map<String, MockRM> INSTANCES = new ConcurrentHashMap<String, MockRM>();
-    Invocation lastCall;
-    boolean isOpen = false;
+    @Test
+    public void testRevisionManagerProtocol() throws Throwable {
 
-    private <T extends Object> T recordCall(T result, Object...args) {
-      StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
-      lastCall = new Invocation(stackTrace[2].getMethodName(), result, args);
-      return result;
+        Configuration conf = getHbaseConf();
+        RevisionManager rm = RevisionManagerFactory.getOpenedRevisionManager(
+            RevisionManagerEndpointClient.class.getName(), conf);
+
+        MockRM mockImpl = MockRM.INSTANCES.get(MockRM.DEFAULT_INSTANCE);
+        Assert.assertNotNull(mockImpl);
+        Assert.assertTrue(mockImpl.isOpen);
+
+        Transaction t = new Transaction("t1", Arrays.asList("f1", "f2"), 0, 0);
+        MockRM.Invocation call = new MockRM.Invocation("keepAlive", null, t);
+        rm.keepAlive(t);
+        Assert.assertEquals(call.methodName, call, mockImpl.lastCall);
+
+        t = new Transaction("t2", Arrays.asList("f21", "f22"), 0, 0);
+        call = new MockRM.Invocation("beginWriteTransaction", null, t.getTableName(), t.getColumnFamilies());
+        call.ret = rm.beginWriteTransaction(t.getTableName(), t.getColumnFamilies());
+        Assert.assertEquals(call.methodName, call, mockImpl.lastCall);
+
+        call = new MockRM.Invocation("createSnapshot", null, "t3", 1L);
+        call.ret = rm.createSnapshot("t3", 1);
+        Assert.assertEquals(call.methodName, call, mockImpl.lastCall);
+
     }
 
-    @Override
-    public void initialize(Configuration conf) {
-      if(!INSTANCES.containsKey(DEFAULT_INSTANCE))
-        INSTANCES.put(DEFAULT_INSTANCE, this);
-    }
-
-    @Override
-    public void open() throws IOException {
-      isOpen = true;
-    }
-
-    @Override
-    public void close() throws IOException {
-      isOpen = false;
-    }
-
-    @Override
-    public void createTable(String table, List<String> columnFamilies) throws IOException {
-    }
-
-    @Override
-    public void dropTable(String table) throws IOException {
-    }
-
-    @Override
-    public Transaction beginWriteTransaction(String table,
-        List<String> families) throws IOException {
-      return recordCall(null, table, families);
-    }
-
-    @Override
-    public Transaction beginWriteTransaction(String table,
-      List<String> families, long keepAlive) throws IOException {
-      return recordCall(null, table, families, keepAlive);
-    }
-
-    @Override
-    public void commitWriteTransaction(Transaction transaction)
-        throws IOException {
-    }
-
-    @Override
-    public void abortWriteTransaction(Transaction transaction)
-        throws IOException {
-    }
-
-    @Override
-    public List<FamilyRevision> getAbortedWriteTransactions(String table,
-        String columnFamily) throws IOException {
-      return null;
-    }
-
-    @Override
-    public TableSnapshot createSnapshot(String tableName)
-        throws IOException {
-      return null;
-    }
-
-    @Override
-    public TableSnapshot createSnapshot(String tableName, long revision)
-        throws IOException {
-      TableSnapshot ret = new TableSnapshot(tableName, new HashMap<String, Long>(), revision);
-      return recordCall(ret, tableName, revision);
-    }
-
-    @Override
-    public void keepAlive(Transaction transaction) throws IOException {
-      recordCall(null, transaction);
-    }
-  }
-
-  @Test
-  public void testRevisionManagerProtocol() throws Throwable {
-
-    Configuration conf = getHbaseConf();
-    RevisionManager rm = RevisionManagerFactory.getOpenedRevisionManager(
-        RevisionManagerEndpointClient.class.getName(), conf);
-
-    MockRM mockImpl = MockRM.INSTANCES.get(MockRM.DEFAULT_INSTANCE);
-    Assert.assertNotNull(mockImpl);
-    Assert.assertTrue(mockImpl.isOpen);
-
-    Transaction t = new Transaction("t1", Arrays.asList("f1", "f2"), 0, 0);
-    MockRM.Invocation call = new MockRM.Invocation("keepAlive", null, t);
-    rm.keepAlive(t);
-    Assert.assertEquals(call.methodName, call, mockImpl.lastCall);
-
-    t = new Transaction("t2", Arrays.asList("f21", "f22"), 0, 0);
-    call = new MockRM.Invocation("beginWriteTransaction", null, t.getTableName(),  t.getColumnFamilies());
-    call.ret = rm.beginWriteTransaction(t.getTableName(), t.getColumnFamilies());
-    Assert.assertEquals(call.methodName, call, mockImpl.lastCall);
-
-    call = new MockRM.Invocation("createSnapshot", null, "t3", 1L);
-    call.ret = rm.createSnapshot("t3", 1);
-    Assert.assertEquals(call.methodName, call, mockImpl.lastCall);
-
-  }
-
 }
diff --git a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestThriftSerialization.java b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestThriftSerialization.java
index c3fa1d8..b1368e6 100644
--- a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestThriftSerialization.java
+++ b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestThriftSerialization.java
@@ -23,6 +23,7 @@
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
+
 import org.apache.hcatalog.hbase.snapshot.transaction.thrift.StoreFamilyRevision;
 import org.apache.hcatalog.hbase.snapshot.transaction.thrift.StoreFamilyRevisionList;
 import org.junit.Test;
@@ -30,8 +31,8 @@
 public class TestThriftSerialization {
 
     @Test
-    public void testLightWeightTransaction(){
-       StoreFamilyRevision trxn = new StoreFamilyRevision(0, 1000);
+    public void testLightWeightTransaction() {
+        StoreFamilyRevision trxn = new StoreFamilyRevision(0, 1000);
         try {
 
             byte[] data = ZKUtil.serialize(trxn);
@@ -47,11 +48,11 @@
     }
 
     @Test
-    public void testWriteTransactionList(){
+    public void testWriteTransactionList() {
         List<StoreFamilyRevision> txnList = new ArrayList<StoreFamilyRevision>();
         long version;
         long timestamp;
-        for( int i = 0; i < 10; i++){
+        for (int i = 0; i < 10; i++) {
             version = i;
             timestamp = 1000 + i;
             StoreFamilyRevision wtx = new StoreFamilyRevision(version, timestamp);
@@ -68,9 +69,9 @@
 
             Iterator<StoreFamilyRevision> itr = newList.getRevisionListIterator();
             int i = 0;
-            while(itr.hasNext()){
+            while (itr.hasNext()) {
                 StoreFamilyRevision txn = itr.next();
-                assertTrue(txn.getRevision() ==  i);
+                assertTrue(txn.getRevision() == i);
                 assertTrue(txn.getTimestamp() == (i + 1000));
                 i++;
             }
diff --git a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestZNodeSetUp.java b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestZNodeSetUp.java
index ea3b1db..4f3a47f 100644
--- a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestZNodeSetUp.java
+++ b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestZNodeSetUp.java
@@ -40,19 +40,19 @@
 import org.junit.Test;
 
 
-public class TestZNodeSetUp extends SkeletonHBaseTest{
+public class TestZNodeSetUp extends SkeletonHBaseTest {
 
-    private static HiveConf   hcatConf;
+    private static HiveConf hcatConf;
     private static HCatDriver hcatDriver;
 
     public void Initialize() throws Exception {
 
         hcatConf = getHiveConf();
         hcatConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
-                HCatSemanticAnalyzer.class.getName());
+            HCatSemanticAnalyzer.class.getName());
         URI fsuri = getFileSystem().getUri();
         Path whPath = new Path(fsuri.getScheme(), fsuri.getAuthority(),
-                getTestDir());
+            getTestDir());
         hcatConf.set(HiveConf.ConfVars.HADOOPFS.varname, fsuri.toString());
         hcatConf.set(ConfVars.METASTOREWAREHOUSE.varname, whPath.toString());
 
@@ -64,7 +64,7 @@
             }
         }
         HBaseConfiguration.merge(hcatConf,
-                RevisionManagerConfiguration.create());
+            RevisionManagerConfiguration.create());
         hcatConf.set(RMConstants.ZOOKEEPER_DATADIR, "/rm_base");
         SessionState.start(new CliSessionState(hcatConf));
         hcatDriver = new HCatDriver();
@@ -72,14 +72,14 @@
     }
 
     @Test
-    public void testBasicZNodeCreation() throws Exception{
+    public void testBasicZNodeCreation() throws Exception {
 
         Initialize();
         int port = getHbaseConf().getInt("hbase.zookeeper.property.clientPort", 2181);
         String servers = getHbaseConf().get("hbase.zookeeper.quorum");
         String[] splits = servers.split(",");
         StringBuffer sb = new StringBuffer();
-        for(String split : splits){
+        for (String split : splits) {
             sb.append(split);
             sb.append(':');
             sb.append(port);
@@ -87,9 +87,9 @@
 
         hcatDriver.run("drop table test_table");
         CommandProcessorResponse response = hcatDriver
-                .run("create table test_table(key int, value string) STORED BY " +
-                     "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
-                    + "TBLPROPERTIES ('hbase.columns.mapping'=':key,cf1:val')");
+            .run("create table test_table(key int, value string) STORED BY " +
+                "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
+                + "TBLPROPERTIES ('hbase.columns.mapping'=':key,cf1:val')");
 
         assertEquals(0, response.getResponseCode());
 
diff --git a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/lock/WriteLockTest.java b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/lock/WriteLockTest.java
index 4f1717b..7720225 100644
--- a/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/lock/WriteLockTest.java
+++ b/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/lock/WriteLockTest.java
@@ -57,6 +57,7 @@
         }
 
     }
+
     protected void runTest(int count) throws Exception {
         nodes = new WriteLock[count];
         for (int i = 0; i < count; i++) {
@@ -85,21 +86,21 @@
 
         if (count > 1) {
             if (killLeader) {
-            System.out.println("Now killing the leader");
-            // now lets kill the leader
-            latch = new CountDownLatch(1);
-            first.unlock();
-            latch.await(30, TimeUnit.SECONDS);
-            //Thread.sleep(10000);
-            WriteLock second = nodes[1];
-            dumpNodes(count);
-            // lets assert that the first election is the leader
-            Assert.assertTrue("The second znode should be the leader " + second.getId(), second.isOwner());
+                System.out.println("Now killing the leader");
+                // now lets kill the leader
+                latch = new CountDownLatch(1);
+                first.unlock();
+                latch.await(30, TimeUnit.SECONDS);
+                //Thread.sleep(10000);
+                WriteLock second = nodes[1];
+                dumpNodes(count);
+                // lets assert that the first election is the leader
+                Assert.assertTrue("The second znode should be the leader " + second.getId(), second.isOwner());
 
-            for (int i = 2; i < count; i++) {
-                WriteLock node = nodes[i];
-                Assert.assertFalse("Node should not be the leader " + node.getId(), node.isOwner());
-            }
+                for (int i = 2; i < count; i++) {
+                    WriteLock node = nodes[i];
+                    Assert.assertFalse("Node should not be the leader " + node.getId(), node.isOwner());
+                }
             }
 
 
@@ -130,7 +131,7 @@
         for (int i = 0; i < count; i++) {
             WriteLock node = nodes[i];
             System.out.println("node: " + i + " id: " +
-                    node.getId() + " is leader: " + node.isOwner());
+                node.getId() + " is leader: " + node.isOwner());
         }
     }
 
diff --git a/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatAddPartitionDesc.java b/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatAddPartitionDesc.java
index f705098..0de20b8 100644
--- a/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatAddPartitionDesc.java
+++ b/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatAddPartitionDesc.java
@@ -42,14 +42,14 @@
     private String tableName;
     private String dbName;
     private String location;
-    private Map<String,String> partSpec;
+    private Map<String, String> partSpec;
 
-     private HCatAddPartitionDesc(String dbName, String tbl, String loc, Map<String,String> spec){
-         this.dbName = dbName;
-         this.tableName = tbl;
-         this.location = loc;
-         this.partSpec = spec;
-     }
+    private HCatAddPartitionDesc(String dbName, String tbl, String loc, Map<String, String> spec) {
+        this.dbName = dbName;
+        this.tableName = tbl;
+        this.location = loc;
+        this.partSpec = spec;
+    }
 
     /**
      * Gets the location.
@@ -88,97 +88,97 @@
         return this.dbName;
     }
 
-     @Override
+    @Override
     public String toString() {
         return "HCatAddPartitionDesc ["
-                + (tableName != null ? "tableName=" + tableName + ", " : "tableName=null")
-                + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null")
-                + (location != null ? "location=" + location + ", " : "location=null")
-                + (partSpec != null ? "partSpec=" + partSpec : "partSpec=null") + "]";
+            + (tableName != null ? "tableName=" + tableName + ", " : "tableName=null")
+            + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null")
+            + (location != null ? "location=" + location + ", " : "location=null")
+            + (partSpec != null ? "partSpec=" + partSpec : "partSpec=null") + "]";
     }
 
     /**
-      * Creates the builder for specifying attributes.
-      *
-      * @param dbName the db name
-      * @param tableName the table name
-      * @param location the location
-      * @param partSpec the part spec
-      * @return the builder
-      * @throws HCatException
-      */
-     public static Builder create(String dbName, String tableName, String location,
-             Map<String,String> partSpec) throws HCatException {
-         return new Builder(dbName, tableName, location, partSpec);
-     }
+     * Creates the builder for specifying attributes.
+     *
+     * @param dbName the db name
+     * @param tableName the table name
+     * @param location the location
+     * @param partSpec the part spec
+     * @return the builder
+     * @throws HCatException
+     */
+    public static Builder create(String dbName, String tableName, String location,
+                                 Map<String, String> partSpec) throws HCatException {
+        return new Builder(dbName, tableName, location, partSpec);
+    }
 
-     Partition toHivePartition(Table hiveTable) throws HCatException{
-         Partition hivePtn = new Partition();
-         hivePtn.setDbName(this.dbName);
-         hivePtn.setTableName(this.tableName);
+    Partition toHivePartition(Table hiveTable) throws HCatException {
+        Partition hivePtn = new Partition();
+        hivePtn.setDbName(this.dbName);
+        hivePtn.setTableName(this.tableName);
 
-         List<String> pvals = new ArrayList<String>();
-         for (FieldSchema field : hiveTable.getPartitionKeys()) {
-           String val = partSpec.get(field.getName());
-           if (val == null || val.length() == 0) {
-               throw new HCatException("create partition: Value for key "
-                   + field.getName() + " is null or empty");
-             }
-           pvals.add(val);
-         }
+        List<String> pvals = new ArrayList<String>();
+        for (FieldSchema field : hiveTable.getPartitionKeys()) {
+            String val = partSpec.get(field.getName());
+            if (val == null || val.length() == 0) {
+                throw new HCatException("create partition: Value for key "
+                    + field.getName() + " is null or empty");
+            }
+            pvals.add(val);
+        }
 
-         hivePtn.setValues(pvals);
-         StorageDescriptor sd = new StorageDescriptor(hiveTable.getSd());
-         hivePtn.setSd(sd);
-         hivePtn.setParameters(hiveTable.getParameters());
-         if (this.location != null) {
-             hivePtn.getSd().setLocation(this.location);
-         } else {
-             String partName;
+        hivePtn.setValues(pvals);
+        StorageDescriptor sd = new StorageDescriptor(hiveTable.getSd());
+        hivePtn.setSd(sd);
+        hivePtn.setParameters(hiveTable.getParameters());
+        if (this.location != null) {
+            hivePtn.getSd().setLocation(this.location);
+        } else {
+            String partName;
             try {
                 partName = Warehouse.makePartName(
-                         hiveTable.getPartitionKeys(), pvals);
+                    hiveTable.getPartitionKeys(), pvals);
                 LOG.info("Setting partition location to :" + partName);
             } catch (MetaException e) {
                 throw new HCatException("Exception while creating partition name.", e);
             }
-             Path partPath = new Path(hiveTable.getSd().getLocation(), partName);
-             hivePtn.getSd().setLocation(partPath.toString());
-         }
-         hivePtn.setCreateTime((int) (System.currentTimeMillis() / 1000));
-         hivePtn.setLastAccessTimeIsSet(false);
-         return hivePtn;
-     }
+            Path partPath = new Path(hiveTable.getSd().getLocation(), partName);
+            hivePtn.getSd().setLocation(partPath.toString());
+        }
+        hivePtn.setCreateTime((int) (System.currentTimeMillis() / 1000));
+        hivePtn.setLastAccessTimeIsSet(false);
+        return hivePtn;
+    }
 
-     public static class Builder {
+    public static class Builder {
 
-         private String tableName;
-         private String location;
-         private Map<String,String> values;
-         private String dbName;
+        private String tableName;
+        private String location;
+        private Map<String, String> values;
+        private String dbName;
 
-         private Builder(String dbName, String tableName, String location, Map<String,String> values){
-             this.dbName = dbName;
-             this.tableName = tableName;
-             this.location = location;
-             this.values = values;
-         }
+        private Builder(String dbName, String tableName, String location, Map<String, String> values) {
+            this.dbName = dbName;
+            this.tableName = tableName;
+            this.location = location;
+            this.values = values;
+        }
 
-         /**
-          * Builds the HCatAddPartitionDesc.
-          *
-          * @return the h cat add partition desc
-          * @throws HCatException
-          */
-         public HCatAddPartitionDesc build() throws HCatException {
-             if(this.dbName == null){
-                 this.dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME;
-             }
-             HCatAddPartitionDesc desc = new HCatAddPartitionDesc(
-                     this.dbName, this.tableName, this.location,
-                     this.values);
-             return desc;
-         }
-     }
+        /**
+         * Builds the HCatAddPartitionDesc.
+         *
+         * @return the h cat add partition desc
+         * @throws HCatException
+         */
+        public HCatAddPartitionDesc build() throws HCatException {
+            if (this.dbName == null) {
+                this.dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME;
+            }
+            HCatAddPartitionDesc desc = new HCatAddPartitionDesc(
+                this.dbName, this.tableName, this.location,
+                this.values);
+            return desc;
+        }
+    }
 
 }
diff --git a/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatClient.java b/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatClient.java
index 4d20e30..d966b8b 100644
--- a/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatClient.java
+++ b/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatClient.java
@@ -30,8 +30,10 @@
  */
 public abstract class HCatClient {
 
-    public enum DROP_DB_MODE { RESTRICT, CASCADE };
+    public enum DROP_DB_MODE {RESTRICT, CASCADE}
+
     public static final String HCAT_CLIENT_IMPL_CLASS = "hcat.client.impl.class";
+
     /**
      * Creates an instance of HCatClient.
      *
@@ -40,32 +42,32 @@
      * @throws HCatException,ConnectionFailureException
      */
     public static HCatClient create(Configuration conf) throws HCatException,
-            ConnectionFailureException {
+        ConnectionFailureException {
         HCatClient client = null;
         String className = conf.get(HCAT_CLIENT_IMPL_CLASS,
-                HCatClientHMSImpl.class.getName());
+            HCatClientHMSImpl.class.getName());
         try {
             Class<? extends HCatClient> clientClass = Class.forName(className,
-                    true, JavaUtils.getClassLoader()).asSubclass(
+                true, JavaUtils.getClassLoader()).asSubclass(
                     HCatClient.class);
             client = (HCatClient) clientClass.newInstance();
         } catch (ClassNotFoundException e) {
             throw new HCatException(
-                    "ClassNotFoundException while creating client class.", e);
+                "ClassNotFoundException while creating client class.", e);
         } catch (InstantiationException e) {
             throw new HCatException(
-                    "InstantiationException while creating client class.", e);
+                "InstantiationException while creating client class.", e);
         } catch (IllegalAccessException e) {
             throw new HCatException(
-                    "IllegalAccessException while creating client class.", e);
+                "IllegalAccessException while creating client class.", e);
         }
-        if(client != null){
+        if (client != null) {
             client.initialize(conf);
         }
         return client;
     }
 
-    abstract void initialize(Configuration conf) throws HCatException,ConnectionFailureException;
+    abstract void initialize(Configuration conf) throws HCatException, ConnectionFailureException;
 
     /**
      * Get all existing databases that match the given
@@ -76,7 +78,7 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract List<String> listDatabaseNamesByPattern(String pattern)
-            throws HCatException, ConnectionFailureException;
+        throws HCatException, ConnectionFailureException;
 
     /**
      * Gets the database.
@@ -85,7 +87,7 @@
      * @return An instance of HCatDatabaseInfo.
      * @throws HCatException,ConnectionFailureException
      */
-    public abstract HCatDatabase getDatabase(String dbName) throws HCatException,ConnectionFailureException;
+    public abstract HCatDatabase getDatabase(String dbName) throws HCatException, ConnectionFailureException;
 
     /**
      * Creates the database.
@@ -94,7 +96,7 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract void createDatabase(HCatCreateDBDesc dbInfo)
-            throws HCatException,ConnectionFailureException;
+        throws HCatException, ConnectionFailureException;
 
     /**
      * Drops a database.
@@ -108,7 +110,7 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract void dropDatabase(String dbName, boolean ifExists,
-            DROP_DB_MODE mode) throws HCatException, ConnectionFailureException;
+                                      DROP_DB_MODE mode) throws HCatException, ConnectionFailureException;
 
     /**
      * Returns all existing tables from the specified database which match the given
@@ -119,7 +121,7 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract List<String> listTableNamesByPattern(String dbName, String tablePattern)
-            throws HCatException,ConnectionFailureException;
+        throws HCatException, ConnectionFailureException;
 
     /**
      * Gets the table.
@@ -130,7 +132,7 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract HCatTable getTable(String dbName, String tableName)
-            throws HCatException,ConnectionFailureException;
+        throws HCatException, ConnectionFailureException;
 
     /**
      * Creates the table.
@@ -139,7 +141,7 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract void createTable(HCatCreateTableDesc createTableDesc)
-            throws HCatException,ConnectionFailureException;
+        throws HCatException, ConnectionFailureException;
 
     /**
      * Creates the table like an existing table.
@@ -154,8 +156,8 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract void createTableLike(String dbName, String existingTblName,
-            String newTableName, boolean ifNotExists, boolean isExternal,
-            String location) throws HCatException,ConnectionFailureException;
+                                         String newTableName, boolean ifNotExists, boolean isExternal,
+                                         String location) throws HCatException, ConnectionFailureException;
 
     /**
      * Drop table.
@@ -167,7 +169,7 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract void dropTable(String dbName, String tableName,
-            boolean ifExists) throws HCatException,ConnectionFailureException;
+                                   boolean ifExists) throws HCatException, ConnectionFailureException;
 
     /**
      * Renames a table.
@@ -178,7 +180,7 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract void renameTable(String dbName, String oldName,
-            String newName) throws HCatException, ConnectionFailureException;
+                                     String newName) throws HCatException, ConnectionFailureException;
 
     /**
      * Gets all the partitions.
@@ -189,7 +191,7 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract List<HCatPartition> getPartitions(String dbName, String tblName)
-            throws HCatException,ConnectionFailureException;
+        throws HCatException, ConnectionFailureException;
 
     /**
      * Gets the partition.
@@ -201,7 +203,7 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract HCatPartition getPartition(String dbName, String tableName,
-            Map<String,String> partitionSpec) throws HCatException,ConnectionFailureException;
+                                               Map<String, String> partitionSpec) throws HCatException, ConnectionFailureException;
 
     /**
      * Adds the partition.
@@ -210,7 +212,7 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract void addPartition(HCatAddPartitionDesc partInfo)
-            throws HCatException, ConnectionFailureException;
+        throws HCatException, ConnectionFailureException;
 
     /**
      * Adds a list of partitions.
@@ -220,7 +222,7 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract int addPartitions(List<HCatAddPartitionDesc> partInfoList)
-            throws HCatException, ConnectionFailureException;
+        throws HCatException, ConnectionFailureException;
 
     /**
      * Drops partition.
@@ -232,8 +234,8 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract void dropPartition(String dbName, String tableName,
-            Map<String, String> partitionSpec, boolean ifExists)
-            throws HCatException, ConnectionFailureException;
+                                       Map<String, String> partitionSpec, boolean ifExists)
+        throws HCatException, ConnectionFailureException;
 
     /**
      * List partitions by filter.
@@ -247,7 +249,7 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract List<HCatPartition> listPartitionsByFilter(String dbName, String tblName,
-            String filter) throws HCatException,ConnectionFailureException;
+                                                               String filter) throws HCatException, ConnectionFailureException;
 
     /**
      * Mark partition for event.
@@ -259,8 +261,8 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract void markPartitionForEvent(String dbName, String tblName,
-            Map<String, String> partKVs, PartitionEventType eventType)
-            throws HCatException,ConnectionFailureException;
+                                               Map<String, String> partKVs, PartitionEventType eventType)
+        throws HCatException, ConnectionFailureException;
 
     /**
      * Checks if a partition is marked for event.
@@ -273,8 +275,8 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract boolean isPartitionMarkedForEvent(String dbName, String tblName,
-            Map<String, String> partKVs, PartitionEventType eventType)
-            throws HCatException,ConnectionFailureException;
+                                                      Map<String, String> partKVs, PartitionEventType eventType)
+        throws HCatException, ConnectionFailureException;
 
     /**
      * Gets the delegation token.
@@ -285,8 +287,8 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract String getDelegationToken(String owner,
-            String renewerKerberosPrincipalName) throws HCatException,
-            ConnectionFailureException;
+                                              String renewerKerberosPrincipalName) throws HCatException,
+        ConnectionFailureException;
 
     /**
      * Renew delegation token.
@@ -296,7 +298,7 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract long renewDelegationToken(String tokenStrForm)
-            throws HCatException, ConnectionFailureException;
+        throws HCatException, ConnectionFailureException;
 
     /**
      * Cancel delegation token.
@@ -305,7 +307,7 @@
      * @throws HCatException,ConnectionFailureException
      */
     public abstract void cancelDelegationToken(String tokenStrForm)
-            throws HCatException, ConnectionFailureException;
+        throws HCatException, ConnectionFailureException;
 
     /**
      * Close the hcatalog client.
diff --git a/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatClientHMSImpl.java b/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatClientHMSImpl.java
index 5e97900..71ac426 100644
--- a/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatClientHMSImpl.java
+++ b/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatClientHMSImpl.java
@@ -53,12 +53,12 @@
 public class HCatClientHMSImpl extends HCatClient {
 
     private HiveMetaStoreClient hmsClient;
-    private Configuration  config;
+    private Configuration config;
     private HiveConf hiveConfig;
 
     @Override
     public List<String> listDatabaseNamesByPattern(String pattern)
-            throws HCatException, ConnectionFailureException {
+        throws HCatException, ConnectionFailureException {
         List<String> dbNames = null;
         try {
             dbNames = hmsClient.getDatabases(pattern);
@@ -70,7 +70,7 @@
 
     @Override
     public HCatDatabase getDatabase(String dbName) throws HCatException,
-            ConnectionFailureException {
+        ConnectionFailureException {
         HCatDatabase db = null;
         try {
             Database hiveDB = hmsClient.getDatabase(checkDB(dbName));
@@ -79,42 +79,42 @@
             }
         } catch (NoSuchObjectException exp) {
             throw new HCatException(
-                    "NoSuchObjectException while fetching database", exp);
+                "NoSuchObjectException while fetching database", exp);
         } catch (MetaException exp) {
             throw new HCatException("MetaException while fetching database",
-                    exp);
+                exp);
         } catch (TException exp) {
             throw new ConnectionFailureException(
-                    "TException while fetching database", exp);
+                "TException while fetching database", exp);
         }
         return db;
     }
 
     @Override
     public void createDatabase(HCatCreateDBDesc dbInfo) throws HCatException,
-            ConnectionFailureException {
+        ConnectionFailureException {
         try {
             hmsClient.createDatabase(dbInfo.toHiveDb());
         } catch (AlreadyExistsException exp) {
             if (!dbInfo.getIfNotExists()) {
                 throw new HCatException(
-                        "AlreadyExistsException while creating database", exp);
+                    "AlreadyExistsException while creating database", exp);
             }
         } catch (InvalidObjectException exp) {
             throw new HCatException(
-                    "InvalidObjectException while creating database", exp);
+                "InvalidObjectException while creating database", exp);
         } catch (MetaException exp) {
             throw new HCatException("MetaException while creating database",
-                    exp);
+                exp);
         } catch (TException exp) {
             throw new ConnectionFailureException(
-                    "TException while creating database", exp);
+                "TException while creating database", exp);
         }
     }
 
     @Override
     public void dropDatabase(String dbName, boolean ifExists, DROP_DB_MODE mode)
-            throws HCatException, ConnectionFailureException {
+        throws HCatException, ConnectionFailureException {
         boolean isCascade;
         if (mode.toString().equalsIgnoreCase("cascade")) {
             isCascade = true;
@@ -126,35 +126,35 @@
         } catch (NoSuchObjectException e) {
             if (!ifExists) {
                 throw new HCatException(
-                        "NoSuchObjectException while dropping db.", e);
+                    "NoSuchObjectException while dropping db.", e);
             }
         } catch (InvalidOperationException e) {
             throw new HCatException(
-                    "InvalidOperationException while dropping db.", e);
+                "InvalidOperationException while dropping db.", e);
         } catch (MetaException e) {
             throw new HCatException("MetaException while dropping db.", e);
         } catch (TException e) {
             throw new ConnectionFailureException("TException while dropping db.",
-                    e);
+                e);
         }
     }
 
     @Override
     public List<String> listTableNamesByPattern(String dbName,
-            String tablePattern) throws HCatException, ConnectionFailureException {
+                                                String tablePattern) throws HCatException, ConnectionFailureException {
         List<String> tableNames = null;
         try {
             tableNames = hmsClient.getTables(checkDB(dbName), tablePattern);
         } catch (MetaException e) {
             throw new HCatException(
-                    "MetaException while fetching table names.", e);
+                "MetaException while fetching table names.", e);
         }
         return tableNames;
     }
 
     @Override
     public HCatTable getTable(String dbName, String tableName)
-            throws HCatException, ConnectionFailureException {
+        throws HCatException, ConnectionFailureException {
         HCatTable table = null;
         try {
             Table hiveTable = hmsClient.getTable(checkDB(dbName), tableName);
@@ -165,35 +165,35 @@
             throw new HCatException("MetaException while fetching table.", e);
         } catch (TException e) {
             throw new ConnectionFailureException(
-                    "TException while fetching table.", e);
+                "TException while fetching table.", e);
         } catch (NoSuchObjectException e) {
             throw new HCatException(
-                    "NoSuchObjectException while fetching table.", e);
+                "NoSuchObjectException while fetching table.", e);
         }
         return table;
     }
 
     @Override
     public void createTable(HCatCreateTableDesc createTableDesc)
-            throws HCatException, ConnectionFailureException {
+        throws HCatException, ConnectionFailureException {
         try {
             hmsClient.createTable(createTableDesc.toHiveTable(hiveConfig));
         } catch (AlreadyExistsException e) {
             if (createTableDesc.getIfNotExists() == false) {
                 throw new HCatException(
-                        "AlreadyExistsException while creating table.", e);
+                    "AlreadyExistsException while creating table.", e);
             }
         } catch (InvalidObjectException e) {
             throw new HCatException(
-                    "InvalidObjectException while creating table.", e);
+                "InvalidObjectException while creating table.", e);
         } catch (MetaException e) {
             throw new HCatException("MetaException while creating table.", e);
         } catch (NoSuchObjectException e) {
             throw new HCatException(
-                    "NoSuchObjectException while creating table.", e);
+                "NoSuchObjectException while creating table.", e);
         } catch (TException e) {
             throw new ConnectionFailureException(
-                    "TException while creating table.", e);
+                "TException while creating table.", e);
         } catch (IOException e) {
             throw new HCatException("IOException while creating hive conf.", e);
         }
@@ -202,69 +202,69 @@
 
     @Override
     public void createTableLike(String dbName, String existingTblName,
-            String newTableName, boolean ifNotExists, boolean isExternal,
-            String location) throws HCatException, ConnectionFailureException {
+                                String newTableName, boolean ifNotExists, boolean isExternal,
+                                String location) throws HCatException, ConnectionFailureException {
 
         Table hiveTable = getHiveTableLike(checkDB(dbName), existingTblName,
-                newTableName, ifNotExists, location);
+            newTableName, ifNotExists, location);
         if (hiveTable != null) {
             try {
                 hmsClient.createTable(hiveTable);
             } catch (AlreadyExistsException e) {
                 if (!ifNotExists) {
                     throw new HCatException(
-                            "A table already exists with the name "
-                                    + newTableName, e);
+                        "A table already exists with the name "
+                            + newTableName, e);
                 }
             } catch (InvalidObjectException e) {
                 throw new HCatException(
-                        "InvalidObjectException in create table like command.",
-                        e);
+                    "InvalidObjectException in create table like command.",
+                    e);
             } catch (MetaException e) {
                 throw new HCatException(
-                        "MetaException in create table like command.", e);
+                    "MetaException in create table like command.", e);
             } catch (NoSuchObjectException e) {
                 throw new HCatException(
-                        "NoSuchObjectException in create table like command.",
-                        e);
+                    "NoSuchObjectException in create table like command.",
+                    e);
             } catch (TException e) {
                 throw new ConnectionFailureException(
-                        "TException in create table like command.", e);
+                    "TException in create table like command.", e);
             }
         }
     }
 
     @Override
     public void dropTable(String dbName, String tableName, boolean ifExists)
-            throws HCatException, ConnectionFailureException {
+        throws HCatException, ConnectionFailureException {
         try {
-            hmsClient.dropTable(checkDB(dbName), tableName,true, ifExists);
+            hmsClient.dropTable(checkDB(dbName), tableName, true, ifExists);
         } catch (NoSuchObjectException e) {
             if (!ifExists) {
                 throw new HCatException(
-                        "NoSuchObjectException while dropping table.", e);
+                    "NoSuchObjectException while dropping table.", e);
             }
         } catch (MetaException e) {
             throw new HCatException("MetaException while dropping table.", e);
         } catch (TException e) {
             throw new ConnectionFailureException(
-                    "TException while dropping table.", e);
+                "TException while dropping table.", e);
         }
     }
 
     @Override
     public void renameTable(String dbName, String oldName, String newName)
-            throws HCatException, ConnectionFailureException {
+        throws HCatException, ConnectionFailureException {
         Table tbl;
         try {
             Table oldtbl = hmsClient.getTable(checkDB(dbName), oldName);
             if (oldtbl != null) {
                 // TODO : Should be moved out.
                 if (oldtbl
-                        .getParameters()
-                        .get(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE) != null) {
+                    .getParameters()
+                    .get(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE) != null) {
                     throw new HCatException(
-                            "Cannot use rename command on a non-native table");
+                        "Cannot use rename command on a non-native table");
                 }
                 tbl = new Table(oldtbl);
                 tbl.setTableName(newName);
@@ -274,229 +274,229 @@
             throw new HCatException("MetaException while renaming table", e);
         } catch (TException e) {
             throw new ConnectionFailureException(
-                    "TException while renaming table", e);
+                "TException while renaming table", e);
         } catch (NoSuchObjectException e) {
             throw new HCatException(
-                    "NoSuchObjectException while renaming table", e);
+                "NoSuchObjectException while renaming table", e);
         } catch (InvalidOperationException e) {
             throw new HCatException(
-                    "InvalidOperationException while renaming table", e);
+                "InvalidOperationException while renaming table", e);
         }
     }
 
     @Override
     public List<HCatPartition> getPartitions(String dbName, String tblName)
-            throws HCatException, ConnectionFailureException {
+        throws HCatException, ConnectionFailureException {
         List<HCatPartition> hcatPtns = new ArrayList<HCatPartition>();
         try {
             List<Partition> hivePtns = hmsClient.listPartitions(
-                    checkDB(dbName), tblName, (short) -1);
+                checkDB(dbName), tblName, (short) -1);
             for (Partition ptn : hivePtns) {
                 hcatPtns.add(new HCatPartition(ptn));
             }
         } catch (NoSuchObjectException e) {
             throw new HCatException(
-                    "NoSuchObjectException while retrieving partition.", e);
+                "NoSuchObjectException while retrieving partition.", e);
         } catch (MetaException e) {
             throw new HCatException(
-                    "MetaException while retrieving partition.", e);
+                "MetaException while retrieving partition.", e);
         } catch (TException e) {
             throw new ConnectionFailureException(
-                    "TException while retrieving partition.", e);
+                "TException while retrieving partition.", e);
         }
         return hcatPtns;
     }
 
     @Override
     public HCatPartition getPartition(String dbName, String tableName,
-            Map<String, String> partitionSpec) throws HCatException,
-            ConnectionFailureException {
+                                      Map<String, String> partitionSpec) throws HCatException,
+        ConnectionFailureException {
         HCatPartition partition = null;
         try {
             ArrayList<String> ptnValues = new ArrayList<String>();
             ptnValues.addAll(partitionSpec.values());
             Partition hivePartition = hmsClient.getPartition(checkDB(dbName),
-                    tableName, ptnValues);
+                tableName, ptnValues);
             if (hivePartition != null) {
                 partition = new HCatPartition(hivePartition);
             }
         } catch (MetaException e) {
             throw new HCatException(
-                    "MetaException while retrieving partition.", e);
+                "MetaException while retrieving partition.", e);
         } catch (TException e) {
             throw new ConnectionFailureException(
-                    "TException while retrieving partition.", e);
+                "TException while retrieving partition.", e);
         } catch (NoSuchObjectException e) {
             throw new HCatException(
-                    "NoSuchObjectException while retrieving partition.", e);
+                "NoSuchObjectException while retrieving partition.", e);
         }
         return partition;
     }
 
     @Override
     public void addPartition(HCatAddPartitionDesc partInfo)
-            throws HCatException, ConnectionFailureException {
+        throws HCatException, ConnectionFailureException {
         Table tbl = null;
         try {
             tbl = hmsClient.getTable(partInfo.getDatabaseName(),
-                    partInfo.getTableName());
+                partInfo.getTableName());
             // TODO: Should be moved out.
             if (tbl.getPartitionKeysSize() == 0) {
                 throw new HCatException("The table " + partInfo.getTableName()
-                        + " is not partitioned.");
+                    + " is not partitioned.");
             }
 
             hmsClient.add_partition(partInfo.toHivePartition(tbl));
         } catch (InvalidObjectException e) {
             throw new HCatException(
-                    "InvalidObjectException while adding partition.", e);
+                "InvalidObjectException while adding partition.", e);
         } catch (AlreadyExistsException e) {
             throw new HCatException(
-                    "AlreadyExistsException while adding partition.", e);
+                "AlreadyExistsException while adding partition.", e);
         } catch (MetaException e) {
             throw new HCatException("MetaException while adding partition.", e);
         } catch (TException e) {
             throw new ConnectionFailureException(
-                    "TException while adding partition.", e);
+                "TException while adding partition.", e);
         } catch (NoSuchObjectException e) {
             throw new HCatException("The table " + partInfo.getTableName()
-                    + " is could not be found.", e);
+                + " is could not be found.", e);
         }
     }
 
     @Override
     public void dropPartition(String dbName, String tableName,
-            Map<String, String> partitionSpec, boolean ifExists)
-            throws HCatException, ConnectionFailureException {
+                              Map<String, String> partitionSpec, boolean ifExists)
+        throws HCatException, ConnectionFailureException {
         try {
             List<String> ptnValues = new ArrayList<String>();
             ptnValues.addAll(partitionSpec.values());
             hmsClient.dropPartition(checkDB(dbName), tableName, ptnValues,
-                    ifExists);
+                ifExists);
         } catch (NoSuchObjectException e) {
             if (!ifExists) {
                 throw new HCatException(
-                        "NoSuchObjectException while dropping partition.", e);
+                    "NoSuchObjectException while dropping partition.", e);
             }
         } catch (MetaException e) {
             throw new HCatException("MetaException while dropping partition.",
-                    e);
+                e);
         } catch (TException e) {
             throw new ConnectionFailureException(
-                    "TException while dropping partition.", e);
+                "TException while dropping partition.", e);
         }
     }
 
     @Override
     public List<HCatPartition> listPartitionsByFilter(String dbName,
-            String tblName, String filter) throws HCatException,
-            ConnectionFailureException {
+                                                      String tblName, String filter) throws HCatException,
+        ConnectionFailureException {
         List<HCatPartition> hcatPtns = new ArrayList<HCatPartition>();
         try {
             List<Partition> hivePtns = hmsClient.listPartitionsByFilter(
-                    checkDB(dbName), tblName, filter, (short) -1);
+                checkDB(dbName), tblName, filter, (short) -1);
             for (Partition ptn : hivePtns) {
                 hcatPtns.add(new HCatPartition(ptn));
             }
         } catch (MetaException e) {
             throw new HCatException("MetaException while fetching partitions.",
-                    e);
+                e);
         } catch (NoSuchObjectException e) {
             throw new HCatException(
-                    "NoSuchObjectException while fetching partitions.", e);
+                "NoSuchObjectException while fetching partitions.", e);
         } catch (TException e) {
             throw new ConnectionFailureException(
-                    "TException while fetching partitions.", e);
+                "TException while fetching partitions.", e);
         }
         return hcatPtns;
     }
 
     @Override
     public void markPartitionForEvent(String dbName, String tblName,
-            Map<String, String> partKVs, PartitionEventType eventType)
-            throws HCatException, ConnectionFailureException {
+                                      Map<String, String> partKVs, PartitionEventType eventType)
+        throws HCatException, ConnectionFailureException {
         try {
             hmsClient.markPartitionForEvent(checkDB(dbName), tblName, partKVs,
-                    eventType);
+                eventType);
         } catch (MetaException e) {
             throw new HCatException(
-                    "MetaException while marking partition for event.", e);
+                "MetaException while marking partition for event.", e);
         } catch (NoSuchObjectException e) {
             throw new HCatException(
-                    "NoSuchObjectException while marking partition for event.",
-                    e);
+                "NoSuchObjectException while marking partition for event.",
+                e);
         } catch (UnknownTableException e) {
             throw new HCatException(
-                    "UnknownTableException while marking partition for event.",
-                    e);
+                "UnknownTableException while marking partition for event.",
+                e);
         } catch (UnknownDBException e) {
             throw new HCatException(
-                    "UnknownDBException while marking partition for event.", e);
+                "UnknownDBException while marking partition for event.", e);
         } catch (TException e) {
             throw new ConnectionFailureException(
-                    "TException while marking partition for event.", e);
+                "TException while marking partition for event.", e);
         } catch (InvalidPartitionException e) {
             throw new HCatException(
-                    "InvalidPartitionException while marking partition for event.",
-                    e);
+                "InvalidPartitionException while marking partition for event.",
+                e);
         } catch (UnknownPartitionException e) {
             throw new HCatException(
-                    "UnknownPartitionException while marking partition for event.",
-                    e);
+                "UnknownPartitionException while marking partition for event.",
+                e);
         }
     }
 
     @Override
     public boolean isPartitionMarkedForEvent(String dbName, String tblName,
-            Map<String, String> partKVs, PartitionEventType eventType)
-            throws HCatException, ConnectionFailureException {
+                                             Map<String, String> partKVs, PartitionEventType eventType)
+        throws HCatException, ConnectionFailureException {
         boolean isMarked = false;
         try {
             isMarked = hmsClient.isPartitionMarkedForEvent(checkDB(dbName),
-                    tblName, partKVs, eventType);
+                tblName, partKVs, eventType);
         } catch (MetaException e) {
             throw new HCatException(
-                    "MetaException while checking partition for event.", e);
+                "MetaException while checking partition for event.", e);
         } catch (NoSuchObjectException e) {
             throw new HCatException(
-                    "NoSuchObjectException while checking partition for event.",
-                    e);
+                "NoSuchObjectException while checking partition for event.",
+                e);
         } catch (UnknownTableException e) {
             throw new HCatException(
-                    "UnknownTableException while checking partition for event.",
-                    e);
+                "UnknownTableException while checking partition for event.",
+                e);
         } catch (UnknownDBException e) {
             throw new HCatException(
-                    "UnknownDBException while checking partition for event.", e);
+                "UnknownDBException while checking partition for event.", e);
         } catch (TException e) {
             throw new ConnectionFailureException(
-                    "TException while checking partition for event.", e);
+                "TException while checking partition for event.", e);
         } catch (InvalidPartitionException e) {
             throw new HCatException(
-                    "InvalidPartitionException while checking partition for event.",
-                    e);
+                "InvalidPartitionException while checking partition for event.",
+                e);
         } catch (UnknownPartitionException e) {
             throw new HCatException(
-                    "UnknownPartitionException while checking partition for event.",
-                    e);
+                "UnknownPartitionException while checking partition for event.",
+                e);
         }
         return isMarked;
     }
 
     @Override
     public String getDelegationToken(String owner,
-            String renewerKerberosPrincipalName) throws HCatException,
-            ConnectionFailureException {
+                                     String renewerKerberosPrincipalName) throws HCatException,
+        ConnectionFailureException {
         String token = null;
         try {
             token = hmsClient.getDelegationToken(owner,
-                    renewerKerberosPrincipalName);
+                renewerKerberosPrincipalName);
         } catch (MetaException e) {
             throw new HCatException(
-                    "MetaException while getting delegation token.", e);
+                "MetaException while getting delegation token.", e);
         } catch (TException e) {
             throw new ConnectionFailureException(
-                    "TException while getting delegation token.", e);
+                "TException while getting delegation token.", e);
         }
 
         return token;
@@ -504,16 +504,16 @@
 
     @Override
     public long renewDelegationToken(String tokenStrForm) throws HCatException,
-            ConnectionFailureException {
+        ConnectionFailureException {
         long time = 0;
         try {
             time = hmsClient.renewDelegationToken(tokenStrForm);
         } catch (MetaException e) {
             throw new HCatException(
-                    "MetaException while renewing delegation token.", e);
+                "MetaException while renewing delegation token.", e);
         } catch (TException e) {
             throw new ConnectionFailureException(
-                    "TException while renewing delegation token.", e);
+                "TException while renewing delegation token.", e);
         }
 
         return time;
@@ -521,15 +521,15 @@
 
     @Override
     public void cancelDelegationToken(String tokenStrForm)
-            throws HCatException, ConnectionFailureException {
+        throws HCatException, ConnectionFailureException {
         try {
             hmsClient.cancelDelegationToken(tokenStrForm);
         } catch (MetaException e) {
             throw new HCatException(
-                    "MetaException while canceling delegation token.", e);
+                "MetaException while canceling delegation token.", e);
         } catch (TException e) {
             throw new ConnectionFailureException(
-                    "TException while canceling delegation token.", e);
+                "TException while canceling delegation token.", e);
         }
     }
 
@@ -542,38 +542,38 @@
      */
     @Override
     void initialize(Configuration conf) throws HCatException,
-            ConnectionFailureException {
+        ConnectionFailureException {
         this.config = conf;
         try {
             hiveConfig = HCatUtil.getHiveConf(config);
             hmsClient = HCatUtil.getHiveClient(hiveConfig);
         } catch (MetaException exp) {
             throw new HCatException("MetaException while creating HMS client",
-                    exp);
+                exp);
         } catch (IOException exp) {
             throw new HCatException("IOException while creating HMS client",
-                    exp);
+                exp);
         }
 
     }
 
     private Table getHiveTableLike(String dbName, String existingTblName,
-            String newTableName, boolean isExternal, String location)
-            throws HCatException, ConnectionFailureException {
+                                   String newTableName, boolean isExternal, String location)
+        throws HCatException, ConnectionFailureException {
         Table oldtbl = null;
         Table newTable = null;
         try {
             oldtbl = hmsClient.getTable(checkDB(dbName), existingTblName);
         } catch (MetaException e1) {
             throw new HCatException(
-                    "MetaException while retrieving existing table.", e1);
+                "MetaException while retrieving existing table.", e1);
         } catch (TException e1) {
             throw new ConnectionFailureException(
-                    "TException while retrieving existing table.", e1);
+                "TException while retrieving existing table.", e1);
         } catch (NoSuchObjectException e1) {
             throw new HCatException(
-                    "NoSuchObjectException while retrieving existing table.",
-                    e1);
+                "NoSuchObjectException while retrieving existing table.",
+                e1);
         }
         if (oldtbl != null) {
             newTable = new Table();
@@ -626,7 +626,7 @@
      */
     @Override
     public int addPartitions(List<HCatAddPartitionDesc> partInfoList)
-            throws HCatException, ConnectionFailureException {
+        throws HCatException, ConnectionFailureException {
         int numPartitions = -1;
         if ((partInfoList == null) || (partInfoList.size() == 0)) {
             throw new HCatException("The partition list is null or empty.");
@@ -635,7 +635,7 @@
         Table tbl = null;
         try {
             tbl = hmsClient.getTable(partInfoList.get(0).getDatabaseName(),
-                    partInfoList.get(0).getTableName());
+                partInfoList.get(0).getTableName());
             ArrayList<Partition> ptnList = new ArrayList<Partition>();
             for (HCatAddPartitionDesc desc : partInfoList) {
                 ptnList.add(desc.toHivePartition(tbl));
@@ -643,19 +643,19 @@
             numPartitions = hmsClient.add_partitions(ptnList);
         } catch (InvalidObjectException e) {
             throw new HCatException(
-                    "InvalidObjectException while adding partition.", e);
+                "InvalidObjectException while adding partition.", e);
         } catch (AlreadyExistsException e) {
             throw new HCatException(
-                    "AlreadyExistsException while adding partition.", e);
+                "AlreadyExistsException while adding partition.", e);
         } catch (MetaException e) {
             throw new HCatException("MetaException while adding partition.", e);
         } catch (TException e) {
             throw new ConnectionFailureException(
-                    "TException while adding partition.", e);
+                "TException while adding partition.", e);
         } catch (NoSuchObjectException e) {
             throw new HCatException("The table "
-                    + partInfoList.get(0).getTableName()
-                    + " is could not be found.", e);
+                + partInfoList.get(0).getTableName()
+                + " is could not be found.", e);
         }
         return numPartitions;
     }
diff --git a/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatCreateDBDesc.java b/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatCreateDBDesc.java
index 22f093c..3f8928c 100644
--- a/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatCreateDBDesc.java
+++ b/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatCreateDBDesc.java
@@ -47,7 +47,7 @@
      *
      * @return the if not exists
      */
-    public boolean getIfNotExists(){
+    public boolean getIfNotExists() {
         return this.ifNotExits;
     }
 
@@ -78,19 +78,19 @@
         return this.dbName;
     }
 
-    private HCatCreateDBDesc(String dbName){
-       this.dbName = dbName;
+    private HCatCreateDBDesc(String dbName) {
+        this.dbName = dbName;
     }
 
     @Override
     public String toString() {
         return "HCatCreateDBDesc ["
-                + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null")
-                + (locationUri != null ? "location=" + locationUri + ", "
-                        : "location=null")
-                + (comment != null ? "comment=" + comment + ", " : "comment=null")
-                + (dbProperties != null ? "dbProperties=" + dbProperties + ", "
-                        : "dbProperties=null") + "ifNotExits=" + ifNotExits + "]";
+            + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null")
+            + (locationUri != null ? "location=" + locationUri + ", "
+            : "location=null")
+            + (comment != null ? "comment=" + comment + ", " : "comment=null")
+            + (dbProperties != null ? "dbProperties=" + dbProperties + ", "
+            : "dbProperties=null") + "ifNotExits=" + ifNotExits + "]";
     }
 
     /**
@@ -99,11 +99,11 @@
      * @param dbName the db name
      * @return the builder
      */
-    public static Builder create(String dbName){
+    public static Builder create(String dbName) {
         return new Builder(dbName);
     }
 
-    Database toHiveDb(){
+    Database toHiveDb() {
         Database hiveDB = new Database();
         hiveDB.setDescription(this.comment);
         hiveDB.setLocationUri(this.locationUri);
@@ -120,7 +120,7 @@
         private String dbName;
         private boolean ifNotExists = false;
 
-        private Builder(String dbName){
+        private Builder(String dbName) {
             this.dbName = dbName;
         }
 
@@ -130,7 +130,7 @@
          * @param value the location of the database.
          * @return the builder
          */
-        public Builder location(String value){
+        public Builder location(String value) {
             this.innerLoc = value;
             return this;
         }
@@ -141,7 +141,7 @@
          * @param value comments.
          * @return the builder
          */
-        public Builder comment(String value){
+        public Builder comment(String value) {
             this.innerComment = value;
             return this;
         }
@@ -152,7 +152,7 @@
          * database with the same name already exists.
          * @return the builder
          */
-        public Builder ifNotExists(boolean ifNotExists){
+        public Builder ifNotExists(boolean ifNotExists) {
             this.ifNotExists = ifNotExists;
             return this;
         }
@@ -176,7 +176,7 @@
          * @throws HCatException
          */
         public HCatCreateDBDesc build() throws HCatException {
-            if(this.dbName == null){
+            if (this.dbName == null) {
                 throw new HCatException("Database name cannot be null.");
             }
             HCatCreateDBDesc desc = new HCatCreateDBDesc(this.dbName);
diff --git a/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatCreateTableDesc.java b/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatCreateTableDesc.java
index b2bfcb4..5a895a7 100644
--- a/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatCreateTableDesc.java
+++ b/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatCreateTableDesc.java
@@ -50,7 +50,7 @@
  * The Class HCatCreateTableDesc for defining attributes for a new table.
  */
 @SuppressWarnings("deprecation")
-public class HCatCreateTableDesc{
+public class HCatCreateTableDesc {
 
     private static final Logger LOG = LoggerFactory.getLogger(HCatCreateTableDesc.class);
 
@@ -72,7 +72,7 @@
     private String serde;
     private String storageHandler;
 
-    private HCatCreateTableDesc(String dbName, String tableName, List<HCatFieldSchema> columns){
+    private HCatCreateTableDesc(String dbName, String tableName, List<HCatFieldSchema> columns) {
         this.dbName = dbName;
         this.tableName = tableName;
         this.cols = columns;
@@ -86,11 +86,11 @@
      * @param columns the columns
      * @return the builder
      */
-    public static Builder create(String dbName, String tableName, List<HCatFieldSchema> columns){
+    public static Builder create(String dbName, String tableName, List<HCatFieldSchema> columns) {
         return new Builder(dbName, tableName, columns);
     }
 
-    Table toHiveTable(HiveConf conf) throws HCatException{
+    Table toHiveTable(HiveConf conf) throws HCatException {
 
         Table newTable = new Table();
         newTable.setDbName(dbName);
@@ -122,26 +122,26 @@
             } else {
                 LOG.info("Using LazySimpleSerDe for table " + tableName);
                 sd.getSerdeInfo()
-                        .setSerializationLib(
-                                org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class
-                                        .getName());
+                    .setSerializationLib(
+                        org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class
+                            .getName());
             }
         } else {
             try {
                 LOG.info("Creating instance of storage handler to get input/output, serder info.");
                 HiveStorageHandler sh = HiveUtils.getStorageHandler(conf,
-                        storageHandler);
+                    storageHandler);
                 sd.setInputFormat(sh.getInputFormatClass().getName());
                 sd.setOutputFormat(sh.getOutputFormatClass().getName());
                 sd.getSerdeInfo().setSerializationLib(
-                        sh.getSerDeClass().getName());
+                    sh.getSerDeClass().getName());
                 newTable.putToParameters(
-                        org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE,
-                        storageHandler);
+                    org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE,
+                    storageHandler);
             } catch (HiveException e) {
                 throw new HCatException(
-                        "Exception while creating instance of storage handler",
-                        e);
+                    "Exception while creating instance of storage handler",
+                    e);
             }
         }
         newTable.setSd(sd);
@@ -175,345 +175,345 @@
         return newTable;
     }
 
-      /**
-       * Gets the if not exists.
-       *
-       * @return the if not exists
-       */
-      public boolean getIfNotExists() {
-          return this.ifNotExists;
-      }
-
-     /**
-      * Gets the table name.
-      *
-      * @return the table name
-      */
-     public String getTableName() {
-          return this.tableName;
-      }
-
-      /**
-       * Gets the cols.
-       *
-       * @return the cols
-       */
-      public List<HCatFieldSchema> getCols() {
-         return this.cols;
-      }
-
-      /**
-       * Gets the partition cols.
-       *
-       * @return the partition cols
-       */
-      public List<HCatFieldSchema> getPartitionCols() {
-          return this.partCols;
-      }
-
-      /**
-       * Gets the bucket cols.
-       *
-       * @return the bucket cols
-       */
-      public List<String> getBucketCols() {
-          return this.bucketCols;
-      }
-
-      public int getNumBuckets() {
-          return this.numBuckets;
-      }
-
-      /**
-       * Gets the comments.
-       *
-       * @return the comments
-       */
-      public String getComments() {
-          return this.comment;
-      }
-
-      /**
-       * Gets the storage handler.
-       *
-       * @return the storage handler
-       */
-      public String getStorageHandler() {
-          return this.storageHandler;
-      }
-
-      /**
-       * Gets the location.
-       *
-       * @return the location
-       */
-      public String getLocation() {
-          return this.location;
-      }
-
-      /**
-       * Gets the external.
-       *
-       * @return the external
-       */
-      public boolean getExternal() {
-          return this.isExternal;
-      }
-
-      /**
-       * Gets the sort cols.
-       *
-       * @return the sort cols
-       */
-      public List<Order> getSortCols() {
-          return this.sortCols;
-      }
-
-      /**
-       * Gets the tbl props.
-       *
-       * @return the tbl props
-       */
-      public Map<String, String> getTblProps() {
-          return this.tblProps;
-      }
-
-      /**
-       * Gets the file format.
-       *
-       * @return the file format
-       */
-      public String getFileFormat(){
-          return this.fileFormat;
-      }
-
-      /**
-       * Gets the database name.
-       *
-       * @return the database name
-       */
-      public String getDatabaseName() {
-          return this.dbName;
-      }
-
-      @Override
-    public String toString() {
-        return "HCatCreateTableDesc ["
-                + (tableName != null ? "tableName=" + tableName + ", " : "tableName=null")
-                + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null")
-                + "isExternal="
-                + isExternal
-                + ", "
-                + (comment != null ? "comment=" + comment + ", " : "comment=null")
-                + (location != null ? "location=" + location + ", " : "location=null")
-                + (cols != null ? "cols=" + cols + ", " : "cols=null")
-                + (partCols != null ? "partCols=" + partCols + ", " : "partCols=null")
-                + (bucketCols != null ? "bucketCols=" + bucketCols + ", " : "bucketCols=null")
-                + "numBuckets="
-                + numBuckets
-                + ", "
-                + (sortCols != null ? "sortCols=" + sortCols + ", " : "sortCols=null")
-                + (tblProps != null ? "tblProps=" + tblProps + ", " : "tblProps=null")
-                + "ifNotExists="
-                + ifNotExists
-                + ", "
-                + (fileFormat != null ? "fileFormat=" + fileFormat + ", " : "fileFormat=null")
-                + (inputformat != null ? "inputformat=" + inputformat + ", "
-                        : "inputformat=null")
-                + (outputformat != null ? "outputformat=" + outputformat + ", "
-                        : "outputformat=null")
-                + (serde != null ? "serde=" + serde + ", " : "serde=null")
-                + (storageHandler != null ? "storageHandler=" + storageHandler
-                        : "storageHandler=null") + "]";
+    /**
+     * Gets the if not exists.
+     *
+     * @return the if not exists
+     */
+    public boolean getIfNotExists() {
+        return this.ifNotExists;
     }
 
-    public static class Builder{
+    /**
+     * Gets the table name.
+     *
+     * @return the table name
+     */
+    public String getTableName() {
+        return this.tableName;
+    }
 
-          private String tableName;
-          private boolean isExternal;
-          private List<HCatFieldSchema> cols;
-          private List<HCatFieldSchema> partCols;
-          private List<String> bucketCols;
-          private List<Order> sortCols;
-          private int numBuckets;
-          private String comment;
-          private String fileFormat;
-          private String location;
-          private String storageHandler;
-          private Map<String, String> tblProps;
-          private boolean ifNotExists;
-          private String dbName;
+    /**
+     * Gets the cols.
+     *
+     * @return the cols
+     */
+    public List<HCatFieldSchema> getCols() {
+        return this.cols;
+    }
+
+    /**
+     * Gets the partition cols.
+     *
+     * @return the partition cols
+     */
+    public List<HCatFieldSchema> getPartitionCols() {
+        return this.partCols;
+    }
+
+    /**
+     * Gets the bucket cols.
+     *
+     * @return the bucket cols
+     */
+    public List<String> getBucketCols() {
+        return this.bucketCols;
+    }
+
+    public int getNumBuckets() {
+        return this.numBuckets;
+    }
+
+    /**
+     * Gets the comments.
+     *
+     * @return the comments
+     */
+    public String getComments() {
+        return this.comment;
+    }
+
+    /**
+     * Gets the storage handler.
+     *
+     * @return the storage handler
+     */
+    public String getStorageHandler() {
+        return this.storageHandler;
+    }
+
+    /**
+     * Gets the location.
+     *
+     * @return the location
+     */
+    public String getLocation() {
+        return this.location;
+    }
+
+    /**
+     * Gets the external.
+     *
+     * @return the external
+     */
+    public boolean getExternal() {
+        return this.isExternal;
+    }
+
+    /**
+     * Gets the sort cols.
+     *
+     * @return the sort cols
+     */
+    public List<Order> getSortCols() {
+        return this.sortCols;
+    }
+
+    /**
+     * Gets the tbl props.
+     *
+     * @return the tbl props
+     */
+    public Map<String, String> getTblProps() {
+        return this.tblProps;
+    }
+
+    /**
+     * Gets the file format.
+     *
+     * @return the file format
+     */
+    public String getFileFormat() {
+        return this.fileFormat;
+    }
+
+    /**
+     * Gets the database name.
+     *
+     * @return the database name
+     */
+    public String getDatabaseName() {
+        return this.dbName;
+    }
+
+    @Override
+    public String toString() {
+        return "HCatCreateTableDesc ["
+            + (tableName != null ? "tableName=" + tableName + ", " : "tableName=null")
+            + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null")
+            + "isExternal="
+            + isExternal
+            + ", "
+            + (comment != null ? "comment=" + comment + ", " : "comment=null")
+            + (location != null ? "location=" + location + ", " : "location=null")
+            + (cols != null ? "cols=" + cols + ", " : "cols=null")
+            + (partCols != null ? "partCols=" + partCols + ", " : "partCols=null")
+            + (bucketCols != null ? "bucketCols=" + bucketCols + ", " : "bucketCols=null")
+            + "numBuckets="
+            + numBuckets
+            + ", "
+            + (sortCols != null ? "sortCols=" + sortCols + ", " : "sortCols=null")
+            + (tblProps != null ? "tblProps=" + tblProps + ", " : "tblProps=null")
+            + "ifNotExists="
+            + ifNotExists
+            + ", "
+            + (fileFormat != null ? "fileFormat=" + fileFormat + ", " : "fileFormat=null")
+            + (inputformat != null ? "inputformat=" + inputformat + ", "
+            : "inputformat=null")
+            + (outputformat != null ? "outputformat=" + outputformat + ", "
+            : "outputformat=null")
+            + (serde != null ? "serde=" + serde + ", " : "serde=null")
+            + (storageHandler != null ? "storageHandler=" + storageHandler
+            : "storageHandler=null") + "]";
+    }
+
+    public static class Builder {
+
+        private String tableName;
+        private boolean isExternal;
+        private List<HCatFieldSchema> cols;
+        private List<HCatFieldSchema> partCols;
+        private List<String> bucketCols;
+        private List<Order> sortCols;
+        private int numBuckets;
+        private String comment;
+        private String fileFormat;
+        private String location;
+        private String storageHandler;
+        private Map<String, String> tblProps;
+        private boolean ifNotExists;
+        private String dbName;
 
 
-          private Builder(String dbName, String tableName, List<HCatFieldSchema> columns) {
-              this.dbName = dbName;
-              this.tableName = tableName;
-              this.cols = columns;
-          }
+        private Builder(String dbName, String tableName, List<HCatFieldSchema> columns) {
+            this.dbName = dbName;
+            this.tableName = tableName;
+            this.cols = columns;
+        }
 
 
-          /**
-           * If not exists.
-           *
-           * @param ifNotExists If set to true, hive will not throw exception, if a
-           * table with the same name already exists.
-           * @return the builder
-           */
-          public Builder ifNotExists(boolean ifNotExists) {
+        /**
+         * If not exists.
+         *
+         * @param ifNotExists If set to true, hive will not throw exception, if a
+         * table with the same name already exists.
+         * @return the builder
+         */
+        public Builder ifNotExists(boolean ifNotExists) {
             this.ifNotExists = ifNotExists;
             return this;
-          }
+        }
 
 
-          /**
-           * Partition cols.
-           *
-           * @param partCols the partition cols
-           * @return the builder
-           */
-          public Builder partCols(ArrayList<HCatFieldSchema> partCols) {
-              this.partCols = partCols;
-              return this;
-          }
+        /**
+         * Partition cols.
+         *
+         * @param partCols the partition cols
+         * @return the builder
+         */
+        public Builder partCols(ArrayList<HCatFieldSchema> partCols) {
+            this.partCols = partCols;
+            return this;
+        }
 
 
-          /**
-           * Bucket cols.
-           *
-           * @param bucketCols the bucket cols
-           * @return the builder
-           */
-          public Builder bucketCols(ArrayList<String> bucketCols, int buckets) {
+        /**
+         * Bucket cols.
+         *
+         * @param bucketCols the bucket cols
+         * @return the builder
+         */
+        public Builder bucketCols(ArrayList<String> bucketCols, int buckets) {
             this.bucketCols = bucketCols;
             this.numBuckets = buckets;
             return this;
-          }
+        }
 
-          /**
-           * Storage handler.
-           *
-           * @param storageHandler the storage handler
-           * @return the builder
-           */
-          public Builder storageHandler(String storageHandler) {
+        /**
+         * Storage handler.
+         *
+         * @param storageHandler the storage handler
+         * @return the builder
+         */
+        public Builder storageHandler(String storageHandler) {
             this.storageHandler = storageHandler;
             return this;
-          }
+        }
 
-          /**
-           * Location.
-           *
-           * @param location the location
-           * @return the builder
-           */
-          public Builder location(String location) {
+        /**
+         * Location.
+         *
+         * @param location the location
+         * @return the builder
+         */
+        public Builder location(String location) {
             this.location = location;
             return this;
-          }
+        }
 
-          /**
-           * Comments.
-           *
-           * @param comment the comment
-           * @return the builder
-           */
-          public Builder comments(String comment) {
+        /**
+         * Comments.
+         *
+         * @param comment the comment
+         * @return the builder
+         */
+        public Builder comments(String comment) {
             this.comment = comment;
             return this;
-          }
+        }
 
-          /**
-           * Checks if is table external.
-           *
-           * @param isExternal the is external
-           * @return the builder
-           */
-          public Builder isTableExternal(boolean isExternal) {
+        /**
+         * Checks if is table external.
+         *
+         * @param isExternal the is external
+         * @return the builder
+         */
+        public Builder isTableExternal(boolean isExternal) {
             this.isExternal = isExternal;
             return this;
-          }
+        }
 
-          /**
-           * Sort cols.
-           *
-           * @param sortCols the sort cols
-           * @return the builder
-           */
-          public Builder sortCols(ArrayList<Order> sortCols) {
+        /**
+         * Sort cols.
+         *
+         * @param sortCols the sort cols
+         * @return the builder
+         */
+        public Builder sortCols(ArrayList<Order> sortCols) {
             this.sortCols = sortCols;
             return this;
-          }
+        }
 
-          /**
-           * Tbl props.
-           *
-           * @param tblProps the tbl props
-           * @return the builder
-           */
-          public Builder tblProps(Map<String, String> tblProps) {
+        /**
+         * Tbl props.
+         *
+         * @param tblProps the tbl props
+         * @return the builder
+         */
+        public Builder tblProps(Map<String, String> tblProps) {
             this.tblProps = tblProps;
             return this;
-          }
+        }
 
-          /**
-           * File format.
-           *
-           * @param format the format
-           * @return the builder
-           */
-          public Builder fileFormat(String format){
-              this.fileFormat = format;
-              return this;
-          }
+        /**
+         * File format.
+         *
+         * @param format the format
+         * @return the builder
+         */
+        public Builder fileFormat(String format) {
+            this.fileFormat = format;
+            return this;
+        }
 
-          /**
-           * Builds the HCatCreateTableDesc.
-           *
-           * @return HCatCreateTableDesc
-           * @throws HCatException
-           */
-          public HCatCreateTableDesc build() throws HCatException {
-              if(this.dbName == null){
+        /**
+         * Builds the HCatCreateTableDesc.
+         *
+         * @return HCatCreateTableDesc
+         * @throws HCatException
+         */
+        public HCatCreateTableDesc build() throws HCatException {
+            if (this.dbName == null) {
                 LOG.info("Database name found null. Setting db to :"
-                        + MetaStoreUtils.DEFAULT_DATABASE_NAME);
+                    + MetaStoreUtils.DEFAULT_DATABASE_NAME);
                 this.dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME;
-              }
-              HCatCreateTableDesc desc = new HCatCreateTableDesc(this.dbName,
-                      this.tableName, this.cols);
-              desc.ifNotExists = this.ifNotExists;
-              desc.isExternal = this.isExternal;
-              desc.comment = this.comment;
-              desc.partCols = this.partCols;
-              desc.bucketCols = this.bucketCols;
-              desc.numBuckets = this.numBuckets;
-              desc.location = this.location;
-              desc.tblProps = this.tblProps;
-              desc.sortCols = this.sortCols;
-              desc.serde = null;
-              if (!StringUtils.isEmpty(fileFormat)) {
-                  desc.fileFormat = fileFormat;
-                  if ("SequenceFile".equalsIgnoreCase(fileFormat)) {
-                      desc.inputformat = SequenceFileInputFormat.class.getName();
-                      desc.outputformat = SequenceFileOutputFormat.class
-                              .getName();
-                  } else if ("RCFile".equalsIgnoreCase(fileFormat)) {
-                      desc.inputformat = RCFileInputFormat.class.getName();
-                      desc.outputformat = RCFileOutputFormat.class.getName();
-                      desc.serde = ColumnarSerDe.class.getName();
-                  }
-                  desc.storageHandler = StringUtils.EMPTY;
-              } else if (!StringUtils.isEmpty(storageHandler)) {
-                  desc.storageHandler = storageHandler;
-              } else {
-                  desc.fileFormat = "TextFile";
-                  LOG.info("Using text file format for the table.");
-                  desc.inputformat = TextInputFormat.class.getName();
-                  LOG.info("Table input format:" + desc.inputformat);
-                  desc.outputformat = IgnoreKeyTextOutputFormat.class
-                          .getName();
-                  LOG.info("Table output format:" + desc.outputformat);
-              }
-              return desc;
-          }
-      }
+            }
+            HCatCreateTableDesc desc = new HCatCreateTableDesc(this.dbName,
+                this.tableName, this.cols);
+            desc.ifNotExists = this.ifNotExists;
+            desc.isExternal = this.isExternal;
+            desc.comment = this.comment;
+            desc.partCols = this.partCols;
+            desc.bucketCols = this.bucketCols;
+            desc.numBuckets = this.numBuckets;
+            desc.location = this.location;
+            desc.tblProps = this.tblProps;
+            desc.sortCols = this.sortCols;
+            desc.serde = null;
+            if (!StringUtils.isEmpty(fileFormat)) {
+                desc.fileFormat = fileFormat;
+                if ("SequenceFile".equalsIgnoreCase(fileFormat)) {
+                    desc.inputformat = SequenceFileInputFormat.class.getName();
+                    desc.outputformat = SequenceFileOutputFormat.class
+                        .getName();
+                } else if ("RCFile".equalsIgnoreCase(fileFormat)) {
+                    desc.inputformat = RCFileInputFormat.class.getName();
+                    desc.outputformat = RCFileOutputFormat.class.getName();
+                    desc.serde = ColumnarSerDe.class.getName();
+                }
+                desc.storageHandler = StringUtils.EMPTY;
+            } else if (!StringUtils.isEmpty(storageHandler)) {
+                desc.storageHandler = storageHandler;
+            } else {
+                desc.fileFormat = "TextFile";
+                LOG.info("Using text file format for the table.");
+                desc.inputformat = TextInputFormat.class.getName();
+                LOG.info("Table input format:" + desc.inputformat);
+                desc.outputformat = IgnoreKeyTextOutputFormat.class
+                    .getName();
+                LOG.info("Table output format:" + desc.outputformat);
+            }
+            return desc;
+        }
+    }
 }
diff --git a/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatDatabase.java b/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatDatabase.java
index 146ce72..9cb9ca9 100644
--- a/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatDatabase.java
+++ b/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatDatabase.java
@@ -44,7 +44,7 @@
      *
      * @return the database name
      */
-    public String getName(){
+    public String getName() {
         return dbName;
     }
 
@@ -53,7 +53,7 @@
      *
      * @return the dB location
      */
-    public String getLocation(){
+    public String getLocation() {
         return dbLocation;
     }
 
@@ -62,7 +62,7 @@
      *
      * @return the comment
      */
-    public String getComment(){
+    public String getComment() {
         return comment;
     }
 
@@ -71,17 +71,17 @@
      *
      * @return the dB properties
      */
-    public Map<String, String> getProperties(){
+    public Map<String, String> getProperties() {
         return props;
     }
 
     @Override
     public String toString() {
         return "HCatDatabase ["
-                + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null")
-                + (dbLocation != null ? "dbLocation=" + dbLocation + ", " : "dbLocation=null")
-                + (comment != null ? "comment=" + comment + ", " : "comment=null")
-                + (props != null ? "props=" + props : "props=null") + "]";
+            + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null")
+            + (dbLocation != null ? "dbLocation=" + dbLocation + ", " : "dbLocation=null")
+            + (comment != null ? "comment=" + comment + ", " : "comment=null")
+            + (props != null ? "props=" + props : "props=null") + "]";
     }
 
 }
diff --git a/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatPartition.java b/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatPartition.java
index 38971e1..7ee4da0 100644
--- a/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatPartition.java
+++ b/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatPartition.java
@@ -41,7 +41,7 @@
     private int createTime;
     private int lastAccessTime;
     private StorageDescriptor sd;
-    private Map<String,String> parameters;
+    private Map<String, String> parameters;
 
     HCatPartition(Partition partition) throws HCatException {
         this.tableName = partition.getTableName();
@@ -62,7 +62,7 @@
      *
      * @return the table name
      */
-    public String getTableName(){
+    public String getTableName() {
         return this.tableName;
     }
 
@@ -71,7 +71,7 @@
      *
      * @return the database name
      */
-    public String getDatabaseName(){
+    public String getDatabaseName() {
         return this.dbName;
     }
 
@@ -80,8 +80,8 @@
      *
      * @return the columns
      */
-    public List<HCatFieldSchema> getColumns(){
-       return this.tableCols;
+    public List<HCatFieldSchema> getColumns() {
+        return this.tableCols;
     }
 
     /**
@@ -89,7 +89,7 @@
      *
      * @return the input format
      */
-    public String getInputFormat(){
+    public String getInputFormat() {
         return this.sd.getInputFormat();
     }
 
@@ -98,7 +98,7 @@
      *
      * @return the output format
      */
-    public String getOutputFormat(){
+    public String getOutputFormat() {
         return this.sd.getOutputFormat();
     }
 
@@ -109,8 +109,8 @@
      */
     public String getStorageHandler() {
         return this.sd
-                .getParameters()
-                .get(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE);
+            .getParameters()
+            .get(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE);
     }
 
     /**
@@ -118,7 +118,7 @@
      *
      * @return the location
      */
-    public String getLocation(){
+    public String getLocation() {
         return this.sd.getLocation();
     }
 
@@ -127,7 +127,7 @@
      *
      * @return the serde
      */
-    public String getSerDe(){
+    public String getSerDe() {
         return this.sd.getSerdeInfo().getSerializationLib();
     }
 
@@ -140,7 +140,7 @@
      *
      * @return the last access time
      */
-    public int getLastAccessTime(){
+    public int getLastAccessTime() {
         return this.lastAccessTime;
     }
 
@@ -158,7 +158,7 @@
      *
      * @return the values
      */
-    public List<String> getValues(){
+    public List<String> getValues() {
         return this.values;
     }
 
@@ -167,7 +167,7 @@
      *
      * @return the bucket columns
      */
-    public List<String> getBucketCols(){
+    public List<String> getBucketCols() {
         return this.sd.getBucketCols();
     }
 
@@ -176,7 +176,7 @@
      *
      * @return the number of buckets
      */
-    public int getNumBuckets(){
+    public int getNumBuckets() {
         return this.sd.getNumBuckets();
     }
 
@@ -185,19 +185,19 @@
      *
      * @return the sort columns
      */
-    public List<Order> getSortCols(){
+    public List<Order> getSortCols() {
         return this.sd.getSortCols();
     }
 
     @Override
     public String toString() {
         return "HCatPartition ["
-                + (tableName != null ? "tableName=" + tableName + ", " : "tableName=null")
-                + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null")
-                + (values != null ? "values=" + values + ", " : "values=null")
-                + "createTime=" + createTime + ", lastAccessTime="
-                + lastAccessTime + ", " + (sd != null ? "sd=" + sd + ", " : "sd=null")
-                + (parameters != null ? "parameters=" + parameters : "parameters=null") + "]";
+            + (tableName != null ? "tableName=" + tableName + ", " : "tableName=null")
+            + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null")
+            + (values != null ? "values=" + values + ", " : "values=null")
+            + "createTime=" + createTime + ", lastAccessTime="
+            + lastAccessTime + ", " + (sd != null ? "sd=" + sd + ", " : "sd=null")
+            + (parameters != null ? "parameters=" + parameters : "parameters=null") + "]";
     }
 
 }
diff --git a/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatTable.java b/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatTable.java
index 36e93a6..fb8b4ae 100644
--- a/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatTable.java
+++ b/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatTable.java
@@ -66,9 +66,9 @@
         inputFileFormat = hiveTable.getSd().getInputFormat();
         outputFileFormat = hiveTable.getSd().getOutputFormat();
         storageHandler = hiveTable
-                .getSd()
-                .getParameters()
-                .get(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE);
+            .getSd()
+            .getParameters()
+            .get(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE);
         tblProps = hiveTable.getParameters();
         serde = hiveTable.getSd().getSerdeInfo().getSerializationLib();
         location = hiveTable.getSd().getLocation();
@@ -187,7 +187,7 @@
      *
      * @return the serde lib
      */
-    public String getSerdeLib(){
+    public String getSerdeLib() {
         return serde;
     }
 
@@ -196,31 +196,31 @@
      *
      * @return the location
      */
-    public String getLocation(){
+    public String getLocation() {
         return location;
     }
 
     @Override
     public String toString() {
         return "HCatTable ["
-                + (tableName != null ? "tableName=" + tableName + ", " : "tableName=null")
-                + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null")
-                + (tabletype != null ? "tabletype=" + tabletype + ", " : "tabletype=null")
-                + (cols != null ? "cols=" + cols + ", " : "cols=null")
-                + (partCols != null ? "partCols=" + partCols + ", " : "partCols==null")
-                + (bucketCols != null ? "bucketCols=" + bucketCols + ", " : "bucketCols=null")
-                + (sortCols != null ? "sortCols=" + sortCols + ", " : "sortCols=null")
-                + "numBuckets="
-                + numBuckets
-                + ", "
-                + (inputFileFormat != null ? "inputFileFormat="
-                        + inputFileFormat + ", " : "inputFileFormat=null")
-                + (outputFileFormat != null ? "outputFileFormat="
-                        + outputFileFormat + ", " : "outputFileFormat=null")
-                + (storageHandler != null ? "storageHandler=" + storageHandler
-                        + ", " : "storageHandler=null")
-                + (tblProps != null ? "tblProps=" + tblProps + ", " : "tblProps=null")
-                + (serde != null ? "serde=" + serde + ", " : "serde=")
-                + (location != null ? "location=" + location : "location=") + "]";
+            + (tableName != null ? "tableName=" + tableName + ", " : "tableName=null")
+            + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null")
+            + (tabletype != null ? "tabletype=" + tabletype + ", " : "tabletype=null")
+            + (cols != null ? "cols=" + cols + ", " : "cols=null")
+            + (partCols != null ? "partCols=" + partCols + ", " : "partCols==null")
+            + (bucketCols != null ? "bucketCols=" + bucketCols + ", " : "bucketCols=null")
+            + (sortCols != null ? "sortCols=" + sortCols + ", " : "sortCols=null")
+            + "numBuckets="
+            + numBuckets
+            + ", "
+            + (inputFileFormat != null ? "inputFileFormat="
+            + inputFileFormat + ", " : "inputFileFormat=null")
+            + (outputFileFormat != null ? "outputFileFormat="
+            + outputFileFormat + ", " : "outputFileFormat=null")
+            + (storageHandler != null ? "storageHandler=" + storageHandler
+            + ", " : "storageHandler=null")
+            + (tblProps != null ? "tblProps=" + tblProps + ", " : "tblProps=null")
+            + (serde != null ? "serde=" + serde + ", " : "serde=")
+            + (location != null ? "location=" + location : "location=") + "]";
     }
 }
diff --git a/webhcat/java-client/src/test/java/org/apache/hcatalog/api/TestHCatClient.java b/webhcat/java-client/src/test/java/org/apache/hcatalog/api/TestHCatClient.java
index 1d846dd..2edb116 100644
--- a/webhcat/java-client/src/test/java/org/apache/hcatalog/api/TestHCatClient.java
+++ b/webhcat/java-client/src/test/java/org/apache/hcatalog/api/TestHCatClient.java
@@ -49,8 +49,8 @@
 
 public class TestHCatClient {
     private static final Logger LOG = LoggerFactory.getLogger(TestHCatClient.class);
-    private static final String msPort  = "20101";
-    private static HiveConf  hcatConf;
+    private static final String msPort = "20101";
+    private static HiveConf hcatConf;
     private static SecurityManager securityManager;
 
     private static class RunMS implements Runnable {
@@ -58,7 +58,7 @@
         @Override
         public void run() {
             try {
-                HiveMetaStore.main(new String[] { "-v", "-p", msPort });
+                HiveMetaStore.main(new String[]{"-v", "-p", msPort});
             } catch (Throwable t) {
                 LOG.error("Exiting. Got exception from metastore: ", t);
             }
@@ -83,14 +83,14 @@
         hcatConf = new HiveConf(TestHCatClient.class);
         hcatConf.set("hive.metastore.local", "false");
         hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:"
-                + msPort);
+            + msPort);
         hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTRETRIES, 3);
         hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
-                HCatSemanticAnalyzer.class.getName());
+            HCatSemanticAnalyzer.class.getName());
         hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
         hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
         hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname,
-                "false");
+            "false");
         System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
         System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
     }
@@ -104,7 +104,7 @@
         client.dropDatabase(db, true, HCatClient.DROP_DB_MODE.CASCADE);
 
         HCatCreateDBDesc dbDesc = HCatCreateDBDesc.create(db).ifNotExists(false)
-                .build();
+            .build();
         client.createDatabase(dbDesc);
         List<String> dbNames = client.listDatabaseNamesByPattern("*");
         assertTrue(dbNames.contains("default"));
@@ -114,22 +114,22 @@
         assertTrue(testDb.getComment() == null);
         assertTrue(testDb.getProperties().size() == 0);
         String warehouseDir = System
-                .getProperty(ConfVars.METASTOREWAREHOUSE.varname, "/user/hive/warehouse");
+            .getProperty(ConfVars.METASTOREWAREHOUSE.varname, "/user/hive/warehouse");
         assertTrue(testDb.getLocation().equals(
-                "file:" + warehouseDir + "/" + db + ".db"));
+            "file:" + warehouseDir + "/" + db + ".db"));
         ArrayList<HCatFieldSchema> cols = new ArrayList<HCatFieldSchema>();
         cols.add(new HCatFieldSchema("id", Type.INT, "id comment"));
         cols.add(new HCatFieldSchema("value", Type.STRING, "value comment"));
         HCatCreateTableDesc tableDesc = HCatCreateTableDesc
-                .create(db, tableOne, cols).fileFormat("rcfile").build();
+            .create(db, tableOne, cols).fileFormat("rcfile").build();
         client.createTable(tableDesc);
         HCatTable table1 = client.getTable(db, tableOne);
         assertTrue(table1.getInputFileFormat().equalsIgnoreCase(
-                RCFileInputFormat.class.getName()));
+            RCFileInputFormat.class.getName()));
         assertTrue(table1.getOutputFileFormat().equalsIgnoreCase(
-                RCFileOutputFormat.class.getName()));
+            RCFileOutputFormat.class.getName()));
         assertTrue(table1.getSerdeLib().equalsIgnoreCase(
-                ColumnarSerDe.class.getName()));
+            ColumnarSerDe.class.getName()));
         assertTrue(table1.getCols().equals(cols));
         // Since "ifexists" was not set to true, trying to create the same table
         // again
@@ -138,20 +138,20 @@
             client.createTable(tableDesc);
         } catch (HCatException e) {
             assertTrue(e.getMessage().contains(
-                    "AlreadyExistsException while creating table."));
+                "AlreadyExistsException while creating table."));
         }
 
         client.dropTable(db, tableOne, true);
         HCatCreateTableDesc tableDesc2 = HCatCreateTableDesc.create(db,
-                tableTwo, cols).build();
+            tableTwo, cols).build();
         client.createTable(tableDesc2);
         HCatTable table2 = client.getTable(db, tableTwo);
         assertTrue(table2.getInputFileFormat().equalsIgnoreCase(
-                TextInputFormat.class.getName()));
+            TextInputFormat.class.getName()));
         assertTrue(table2.getOutputFileFormat().equalsIgnoreCase(
-                IgnoreKeyTextOutputFormat.class.getName()));
+            IgnoreKeyTextOutputFormat.class.getName()));
         assertTrue(table2.getLocation().equalsIgnoreCase(
-                "file:" + warehouseDir + "/" + db + ".db/" + tableTwo));
+            "file:" + warehouseDir + "/" + db + ".db/" + tableTwo));
         client.close();
     }
 
@@ -163,48 +163,48 @@
         client.dropDatabase(dbName, true, HCatClient.DROP_DB_MODE.CASCADE);
 
         HCatCreateDBDesc dbDesc = HCatCreateDBDesc.create(dbName)
-                .ifNotExists(true).build();
+            .ifNotExists(true).build();
         client.createDatabase(dbDesc);
         ArrayList<HCatFieldSchema> cols = new ArrayList<HCatFieldSchema>();
         cols.add(new HCatFieldSchema("userid", Type.INT, "id columns"));
         cols.add(new HCatFieldSchema("viewtime", Type.BIGINT,
-                "view time columns"));
+            "view time columns"));
         cols.add(new HCatFieldSchema("pageurl", Type.STRING, ""));
         cols.add(new HCatFieldSchema("ip", Type.STRING,
-                "IP Address of the User"));
+            "IP Address of the User"));
 
         ArrayList<HCatFieldSchema> ptnCols = new ArrayList<HCatFieldSchema>();
         ptnCols.add(new HCatFieldSchema("dt", Type.STRING, "date column"));
         ptnCols.add(new HCatFieldSchema("country", Type.STRING,
-                "country column"));
+            "country column"));
         HCatCreateTableDesc tableDesc = HCatCreateTableDesc
-                .create(dbName, tableName, cols).fileFormat("sequencefile")
-                .partCols(ptnCols).build();
+            .create(dbName, tableName, cols).fileFormat("sequencefile")
+            .partCols(ptnCols).build();
         client.createTable(tableDesc);
 
         Map<String, String> firstPtn = new HashMap<String, String>();
         firstPtn.put("dt", "04/30/2012");
         firstPtn.put("country", "usa");
         HCatAddPartitionDesc addPtn = HCatAddPartitionDesc.create(dbName,
-                tableName, null, firstPtn).build();
+            tableName, null, firstPtn).build();
         client.addPartition(addPtn);
 
         Map<String, String> secondPtn = new HashMap<String, String>();
         secondPtn.put("dt", "04/12/2012");
         secondPtn.put("country", "brazil");
         HCatAddPartitionDesc addPtn2 = HCatAddPartitionDesc.create(dbName,
-                tableName, null, secondPtn).build();
+            tableName, null, secondPtn).build();
         client.addPartition(addPtn2);
 
         Map<String, String> thirdPtn = new HashMap<String, String>();
         thirdPtn.put("dt", "04/13/2012");
         thirdPtn.put("country", "argetina");
         HCatAddPartitionDesc addPtn3 = HCatAddPartitionDesc.create(dbName,
-                tableName, null, thirdPtn).build();
+            tableName, null, thirdPtn).build();
         client.addPartition(addPtn3);
 
         List<HCatPartition> ptnList = client.listPartitionsByFilter(dbName,
-                tableName, null);
+            tableName, null);
         assertTrue(ptnList.size() == 3);
 
         HCatPartition ptn = client.getPartition(dbName, tableName, firstPtn);
@@ -212,29 +212,29 @@
 
         client.dropPartition(dbName, tableName, firstPtn, true);
         ptnList = client.listPartitionsByFilter(dbName,
-                tableName, null);
+            tableName, null);
         assertTrue(ptnList.size() == 2);
 
         List<HCatPartition> ptnListTwo = client.listPartitionsByFilter(dbName,
-                tableName, "country = \"argetina\"");
+            tableName, "country = \"argetina\"");
         assertTrue(ptnListTwo.size() == 1);
 
         client.markPartitionForEvent(dbName, tableName, thirdPtn,
-                PartitionEventType.LOAD_DONE);
+            PartitionEventType.LOAD_DONE);
         boolean isMarked = client.isPartitionMarkedForEvent(dbName, tableName,
-                thirdPtn, PartitionEventType.LOAD_DONE);
+            thirdPtn, PartitionEventType.LOAD_DONE);
         assertTrue(isMarked);
         client.close();
     }
 
     @Test
-    public void testDatabaseLocation() throws Exception{
+    public void testDatabaseLocation() throws Exception {
         HCatClient client = HCatClient.create(new Configuration(hcatConf));
         String dbName = "locationDB";
         client.dropDatabase(dbName, true, HCatClient.DROP_DB_MODE.CASCADE);
 
         HCatCreateDBDesc dbDesc = HCatCreateDBDesc.create(dbName)
-                .ifNotExists(true).location("/tmp/"+dbName).build();
+            .ifNotExists(true).location("/tmp/" + dbName).build();
         client.createDatabase(dbDesc);
         HCatDatabase newDB = client.getDatabase(dbName);
         assertTrue(newDB.getLocation().equalsIgnoreCase("file:/tmp/" + dbName));
@@ -253,12 +253,12 @@
         cols.add(new HCatFieldSchema("id", Type.INT, "id columns"));
         cols.add(new HCatFieldSchema("value", Type.STRING, "id columns"));
         HCatCreateTableDesc tableDesc = HCatCreateTableDesc
-                .create(null, tableName, cols).fileFormat("rcfile").build();
+            .create(null, tableName, cols).fileFormat("rcfile").build();
         client.createTable(tableDesc);
         // create a new table similar to previous one.
         client.createTableLike(null, tableName, cloneTable, true, false, null);
         List<String> tables = client.listTableNamesByPattern(null, "table*");
-        assertTrue(tables.size() ==2);
+        assertTrue(tables.size() == 2);
         client.close();
     }
 
@@ -273,12 +273,12 @@
         cols.add(new HCatFieldSchema("id", Type.INT, "id columns"));
         cols.add(new HCatFieldSchema("value", Type.STRING, "id columns"));
         HCatCreateTableDesc tableDesc = HCatCreateTableDesc
-                .create(null, tableName, cols).fileFormat("rcfile").build();
+            .create(null, tableName, cols).fileFormat("rcfile").build();
         client.createTable(tableDesc);
-        client.renameTable(null, tableName,newName);
+        client.renameTable(null, tableName, newName);
         try {
             client.getTable(null, tableName);
-        } catch(HCatException exp){
+        } catch (HCatException exp) {
             assertTrue(exp.getMessage().contains("NoSuchObjectException while fetching table"));
         }
         HCatTable newTable = client.getTable(null, newName);
@@ -299,7 +299,7 @@
         cols.add(new HCatFieldSchema("value", Type.STRING, "id columns"));
         try {
             HCatCreateTableDesc tableDesc = HCatCreateTableDesc
-                    .create(null, tableName, cols).fileFormat("rcfile").build();
+                .create(null, tableName, cols).fileFormat("rcfile").build();
             client.createTable(tableDesc);
         } catch (Exception exp) {
             isExceptionCaught = true;
@@ -309,7 +309,7 @@
             String newName = "goodTable";
             client.dropTable(null, newName, true);
             HCatCreateTableDesc tableDesc2 = HCatCreateTableDesc
-                    .create(null, newName, cols).fileFormat("rcfile").build();
+                .create(null, newName, cols).fileFormat("rcfile").build();
             client.createTable(tableDesc2);
             HCatTable newTable = client.getTable(null, newName);
             assertTrue(newTable != null);
@@ -332,7 +332,7 @@
         cols.add(new HCatFieldSchema("value", Type.STRING, "id columns"));
         try {
             HCatCreateTableDesc tableDesc = HCatCreateTableDesc
-                    .create(null, tableName, cols).fileFormat("rcfile").build();
+                .create(null, tableName, cols).fileFormat("rcfile").build();
             client.createTable(tableDesc);
             // The DB foo is non-existent.
             client.getTable("foo", tableName);
@@ -342,7 +342,7 @@
             String newName = "goodTable";
             client.dropTable(null, newName, true);
             HCatCreateTableDesc tableDesc2 = HCatCreateTableDesc
-                    .create(null, newName, cols).fileFormat("rcfile").build();
+                .create(null, newName, cols).fileFormat("rcfile").build();
             client.createTable(tableDesc2);
             HCatTable newTable = client.getTable(null, newName);
             assertTrue(newTable != null);
diff --git a/webhcat/svr/src/main/java/org/apache/hadoop/mapred/TempletonJobTracker.java b/webhcat/svr/src/main/java/org/apache/hadoop/mapred/TempletonJobTracker.java
index b9a4649..84f7945 100644
--- a/webhcat/svr/src/main/java/org/apache/hadoop/mapred/TempletonJobTracker.java
+++ b/webhcat/svr/src/main/java/org/apache/hadoop/mapred/TempletonJobTracker.java
@@ -19,6 +19,7 @@
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
@@ -36,16 +37,15 @@
     public TempletonJobTracker(UserGroupInformation ugi,
                                InetSocketAddress addr,
                                Configuration conf)
-        throws IOException
-    {
+        throws IOException {
         cnx = (JobSubmissionProtocol)
             RPC.getProxy(JobSubmissionProtocol.class,
-                         JobSubmissionProtocol.versionID,
-                         addr,
-                         ugi,
-                         conf,
-                         NetUtils.getSocketFactory(conf,
-                                                   JobSubmissionProtocol.class));
+                JobSubmissionProtocol.versionID,
+                addr,
+                ugi,
+                conf,
+                NetUtils.getSocketFactory(conf,
+                    JobSubmissionProtocol.class));
     }
 
     /**
@@ -54,8 +54,7 @@
      * @return Profile of the job, or null if not found.
      */
     public JobProfile getJobProfile(JobID jobid)
-        throws IOException
-    {
+        throws IOException {
         return cnx.getJobProfile(jobid);
     }
 
@@ -65,8 +64,7 @@
      * @return Status of the job, or null if not found.
      */
     public JobStatus getJobStatus(JobID jobid)
-        throws IOException
-    {
+        throws IOException {
         return cnx.getJobStatus(jobid);
     }
 
@@ -75,8 +73,7 @@
      * Kill a job.
      */
     public void killJob(JobID jobid)
-        throws IOException
-    {
+        throws IOException {
         cnx.killJob(jobid);
     }
 
@@ -84,8 +81,7 @@
      * Get all the jobs submitted.
      */
     public JobStatus[] getAllJobs()
-        throws IOException
-    {
+        throws IOException {
         return cnx.getAllJobs();
     }
 
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/AppConfig.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/AppConfig.java
index 71b791b..28e96e3 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/AppConfig.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/AppConfig.java
@@ -192,11 +192,14 @@
 
     public long zkCleanupInterval()  {
         return getLong(ZooKeeperCleanup.ZK_CLEANUP_INTERVAL,
-                       (1000L * 60L * 60L * 12L)); }
-    public long zkMaxAge()           {
+            (1000L * 60L * 60L * 12L));
+    }
+
+    public long zkMaxAge() {
         return getLong(ZooKeeperCleanup.ZK_CLEANUP_MAX_AGE,
-                       (1000L * 60L * 60L * 24L * 7L)); }
+            (1000L * 60L * 60L * 24L * 7L));
+    }
+
     public String zkHosts()          { return get(ZooKeeperStorage.ZK_HOSTS); }
-    public int zkSessionTimeout()    { return getInt(ZooKeeperStorage.ZK_SESSION_TIMEOUT,
-                                                     30000); }
+    public int zkSessionTimeout()    { return getInt(ZooKeeperStorage.ZK_SESSION_TIMEOUT, 30000); }
 }
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/CatchallExceptionMapper.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/CatchallExceptionMapper.java
index 411ff55..807ecbb 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/CatchallExceptionMapper.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/CatchallExceptionMapper.java
@@ -20,6 +20,7 @@
 import javax.ws.rs.core.Response;
 import javax.ws.rs.ext.ExceptionMapper;
 import javax.ws.rs.ext.Provider;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
@@ -29,8 +30,7 @@
  */
 @Provider
 public class CatchallExceptionMapper
-    implements ExceptionMapper<Exception>
-{
+    implements ExceptionMapper<Exception> {
     private static final Log LOG = LogFactory.getLog(CatchallExceptionMapper.class);
 
     public Response toResponse(Exception e) {
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/CompleteDelegator.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/CompleteDelegator.java
index 0aa73da..ddb8677 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/CompleteDelegator.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/CompleteDelegator.java
@@ -21,6 +21,7 @@
 import java.net.URL;
 import java.net.MalformedURLException;
 import java.util.Date;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hcatalog.templeton.tool.JobState;
@@ -50,8 +51,7 @@
     }
 
     public CompleteBean run(String id)
-        throws CallbackFailedException, IOException
-    {
+        throws CallbackFailedException, IOException {
         if (id == null)
             acceptWithError("No jobid given");
 
@@ -64,7 +64,7 @@
             Long notified = state.getNotifiedTime();
             if (notified != null)
                 return acceptWithError("Callback already run on "
-                                       + new Date(notified.longValue()));
+                    + new Date(notified.longValue()));
 
             String callback = state.getCallback();
             if (callback == null)
@@ -90,16 +90,14 @@
      * replaced with the completed jobid.
      */
     public static void doCallback(String jobid, String url)
-        throws MalformedURLException, IOException
-    {
+        throws MalformedURLException, IOException {
         if (url.contains("$jobId"))
             url = url.replace("$jobId", jobid);
         TempletonUtils.fetchUrl(new URL(url));
     }
 
     private void failed(String msg, Exception e)
-        throws CallbackFailedException
-    {
+        throws CallbackFailedException {
         if (e != null)
             LOG.error(msg, e);
         else
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/ExecServiceImpl.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/ExecServiceImpl.java
index 6cf48db..41920b5 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/ExecServiceImpl.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/ExecServiceImpl.java
@@ -24,6 +24,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.Semaphore;
+
 import org.apache.commons.exec.CommandLine;
 import org.apache.commons.exec.DefaultExecutor;
 import org.apache.commons.exec.ExecuteException;
@@ -67,12 +68,11 @@
      * @param program   The program to run
      * @param args      Arguments to pass to the program
      * @param env       Any extra environment variables to set
-     * @return          The result of the run.
+     * @return The result of the run.
      */
     public ExecBean run(String program, List<String> args,
                         Map<String, String> env)
-        throws NotAuthorizedException, BusyException, ExecuteException, IOException
-    {
+        throws NotAuthorizedException, BusyException, ExecuteException, IOException {
         boolean aquired = false;
         try {
             aquired = avail.tryAcquire();
@@ -95,12 +95,11 @@
      * @param program   The program to run.
      * @param args      Arguments to pass to the program
      * @param env       Any extra environment variables to set
-     * @return          The result of the run.
+     * @return The result of the run.
      */
     public ExecBean runUnlimited(String program, List<String> args,
                                  Map<String, String> env)
-        throws NotAuthorizedException, ExecuteException, IOException
-    {
+        throws NotAuthorizedException, ExecuteException, IOException {
         try {
             return auxRun(program, args, env);
         } catch (IOException e) {
@@ -109,13 +108,12 @@
                 throw e;
             else
                 throw new IOException("Invalid permissions on Templeton directory: "
-                                      + cwd.getCanonicalPath());
+                    + cwd.getCanonicalPath());
         }
     }
 
     private ExecBean auxRun(String program, List<String> args, Map<String, String> env)
-        throws NotAuthorizedException, ExecuteException, IOException
-    {
+        throws NotAuthorizedException, ExecuteException, IOException {
         DefaultExecutor executor = new DefaultExecutor();
         executor.setExitValues(null);
 
@@ -144,8 +142,7 @@
 
     private CommandLine makeCommandLine(String program,
                                         List<String> args)
-        throws NotAuthorizedException, IOException
-    {
+        throws NotAuthorizedException, IOException {
         String path = validateProgram(program);
         CommandLine cmd = new CommandLine(path);
         if (args != null)
@@ -171,8 +168,8 @@
         }
         if (env != null)
             res.putAll(env);
-        for(Map.Entry<String, String> envs : res.entrySet()){
-	    LOG.info("Env " + envs.getKey() + "=" + envs.getValue());
+        for (Map.Entry<String, String> envs : res.entrySet()) {
+            LOG.info("Env " + envs.getKey() + "=" + envs.getValue());
         }
         return res;
     }
@@ -182,11 +179,10 @@
      * an exception if the program is missing or not authorized.
      *
      * @param path      The path of the program.
-     * @return          The path of the validated program.
+     * @return The path of the validated program.
      */
     public String validateProgram(String path)
-        throws NotAuthorizedException, IOException
-    {
+        throws NotAuthorizedException, IOException {
         File f = new File(path);
         if (f.canExecute()) {
             return f.getCanonicalPath();
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/HcatDelegator.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/HcatDelegator.java
index 5fa6da1..94855dd 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/HcatDelegator.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/HcatDelegator.java
@@ -24,6 +24,7 @@
 import java.util.List;
 import java.util.Map;
 import javax.ws.rs.core.Response;
+
 import org.apache.commons.exec.ExecuteException;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
@@ -52,8 +53,7 @@
      */
     public ExecBean run(String user, String exec, boolean format,
                         String group, String permissions)
-        throws NotAuthorizedException, BusyException, ExecuteException, IOException
-    {
+        throws NotAuthorizedException, BusyException, ExecuteException, IOException {
         SecureProxySupport proxy = new SecureProxySupport();
         try {
             List<String> args = makeArgs(exec, format, group, permissions);
@@ -103,8 +103,7 @@
      */
     public Response descDatabase(String user, String db, boolean extended)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         String exec = "desc database " + db + "; ";
         if (extended)
             exec = "desc database extended " + db + "; ";
@@ -115,12 +114,12 @@
         } catch (HcatException e) {
             if (e.execBean.stderr.indexOf("Error in semantic analysis") > -1) {
                 return JsonBuilder.create().
-                        put("error", "Database " + db + " does not exist")
-                        .put("errorCode", "404")
-                        .put("database", db).build();
+                    put("error", "Database " + db + " does not exist")
+                    .put("errorCode", "404")
+                    .put("database", db).build();
             }
             throw new HcatException("unable to describe database: " + db,
-                                    e.execBean, exec);
+                e.execBean, exec);
         }
     }
 
@@ -130,8 +129,7 @@
      */
     public Response listDatabases(String user, String dbPattern)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         String exec = String.format("show databases like '%s';", dbPattern);
         try {
             String res = jsonRun(user, exec);
@@ -139,7 +137,7 @@
                 .build();
         } catch (HcatException e) {
             throw new HcatException("unable to show databases for: " + dbPattern,
-                                    e.execBean, exec);
+                e.execBean, exec);
         }
     }
 
@@ -148,8 +146,7 @@
      */
     public Response createDatabase(String user, DatabaseDesc desc)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         String exec = "create database";
         if (desc.ifNotExists)
             exec += " if not exists";
@@ -160,7 +157,7 @@
             exec += String.format(" location '%s'", desc.location);
         if (TempletonUtils.isset(desc.properties))
             exec += String.format(" with dbproperties (%s)",
-                                  makePropertiesStatement(desc.properties));
+                makePropertiesStatement(desc.properties));
         exec += ";";
 
         String res = jsonRun(user, exec, desc.group, desc.permissions);
@@ -176,8 +173,7 @@
                                  boolean ifExists, String option,
                                  String group, String permissions)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         String exec = "drop database";
         if (ifExists)
             exec += " if exists";
@@ -197,8 +193,7 @@
      */
     public Response createTable(String user, String db, TableDesc desc)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         String exec = makeCreateTable(db, desc);
 
         try {
@@ -210,7 +205,7 @@
                 .build();
         } catch (final HcatException e) {
             throw new HcatException("unable to create table: " + desc.table,
-                                    e.execBean, exec);
+                e.execBean, exec);
         }
     }
 
@@ -219,8 +214,7 @@
      */
     public Response createTableLike(String user, String db, TableLikeDesc desc)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         String exec = String.format("use %s; create", db);
 
         if (desc.external)
@@ -239,7 +233,7 @@
                 .build();
         } catch (final HcatException e) {
             throw new HcatException("unable to create table: " + desc.newTable,
-                                    e.execBean, exec);
+                e.execBean, exec);
         }
     }
 
@@ -248,8 +242,7 @@
      */
     public Response descTable(String user, String db, String table, boolean extended)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         String exec = "use " + db + "; ";
         if (extended)
             exec += "desc extended " + table + "; ";
@@ -263,7 +256,7 @@
                 .build();
         } catch (HcatException e) {
             throw new HcatException("unable to describe table: " + table,
-                                    e.execBean, exec);
+                e.execBean, exec);
         }
     }
 
@@ -273,10 +266,9 @@
      */
     public Response listTables(String user, String db, String tablePattern)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         String exec = String.format("use %s; show tables like '%s';",
-                                    db, tablePattern);
+            db, tablePattern);
         try {
             String res = jsonRun(user, exec);
             return JsonBuilder.create(res)
@@ -284,7 +276,7 @@
                 .build();
         } catch (HcatException e) {
             throw new HcatException("unable to show tables for: " + tablePattern,
-                                    e.execBean, exec);
+                e.execBean, exec);
         }
     }
 
@@ -294,10 +286,9 @@
      */
     public Response descExtendedTable(String user, String db, String table)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         String exec = String.format("use %s; show table extended like %s;",
-                                    db, table);
+            db, table);
         try {
             String res = jsonRun(user, exec);
             JsonBuilder jb = JsonBuilder.create(singleTable(res, table))
@@ -364,7 +355,7 @@
             exec += String.format(" location '%s'", desc.location);
         if (TempletonUtils.isset(desc.tableProperties))
             exec += String.format(" tblproperties (%s)",
-                                  makePropertiesStatement(desc.tableProperties));
+                makePropertiesStatement(desc.tableProperties));
         exec += ";";
 
         return exec;
@@ -411,9 +402,9 @@
     private String makeRowFormat(TableDesc.RowFormatDesc desc) {
         String res =
             makeTermBy(desc.fieldsTerminatedBy, "fields")
-            + makeTermBy(desc.collectionItemsTerminatedBy, "collection items")
-            + makeTermBy(desc.mapKeysTerminatedBy, "map keys")
-            + makeTermBy(desc.linesTerminatedBy, "lines");
+                + makeTermBy(desc.collectionItemsTerminatedBy, "collection items")
+                + makeTermBy(desc.mapKeysTerminatedBy, "map keys")
+                + makeTermBy(desc.linesTerminatedBy, "lines");
 
         if (TempletonUtils.isset(res))
             return "row format delimited" + res;
@@ -427,7 +418,7 @@
     private String makeTermBy(String sep, String fieldName) {
 
         if (TempletonUtils.isset(sep))
-          return String.format(" %s terminated by '%s'", fieldName, sep);
+            return String.format(" %s terminated by '%s'", fieldName, sep);
         else
             return "";
     }
@@ -437,7 +428,7 @@
         String res = "row format serde " + desc.name;
         if (TempletonUtils.isset(desc.properties))
             res += String.format(" with serdeproperties (%s)",
-                                 makePropertiesStatement(desc.properties));
+                makePropertiesStatement(desc.properties));
         return res;
     }
 
@@ -454,14 +445,13 @@
         String res = String.format("stored by '%s'", desc.className);
         if (TempletonUtils.isset(desc.properties))
             res += String.format(" with serdeproperties (%s)",
-                                 makePropertiesStatement(desc.properties));
+                makePropertiesStatement(desc.properties));
         return res;
     }
 
     // Pull out the first table from the "show extended" json.
     private String singleTable(String json, String table)
-        throws IOException
-    {
+        throws IOException {
         Map obj = JsonBuilder.jsonToMap(json);
         if (JsonBuilder.isError(obj))
             return json;
@@ -472,8 +462,8 @@
         else {
             return JsonBuilder
                 .createError(String.format("Table %s does not exist", table),
-                             JsonBuilder.MISSING).
-                buildJson();
+                    JsonBuilder.MISSING).
+                    buildJson();
         }
     }
 
@@ -484,8 +474,7 @@
                               String table, boolean ifExists,
                               String group, String permissions)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         String exec = String.format("use %s; drop table", db);
         if (ifExists)
             exec += " if exists";
@@ -509,10 +498,9 @@
                                 String oldTable, String newTable,
                                 String group, String permissions)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         String exec = String.format("use %s; alter table %s rename to %s;",
-                                    db, oldTable, newTable);
+            db, oldTable, newTable);
         try {
             String res = jsonRun(user, exec, group, permissions, true);
             return JsonBuilder.create(res)
@@ -521,7 +509,7 @@
                 .build();
         } catch (HcatException e) {
             throw new HcatException("unable to rename table: " + oldTable,
-                                    e.execBean, exec);
+                e.execBean, exec);
         }
     }
 
@@ -531,8 +519,7 @@
     public Response descTableProperty(String user, String db,
                                       String table, String property)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         Response res = descTable(user, db, table, true);
         if (res.getStatus() != JsonBuilder.OK)
             return res;
@@ -558,8 +545,7 @@
      */
     public Response listTableProperties(String user, String db, String table)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         Response res = descTable(user, db, table, true);
         if (res.getStatus() != JsonBuilder.OK)
             return res;
@@ -577,11 +563,10 @@
     public Response addOneTableProperty(String user, String db, String table,
                                         TablePropertyDesc desc)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         String exec
             = String.format("use %s; alter table %s set tblproperties ('%s'='%s');",
-                            db, table, desc.name, desc.value);
+                db, table, desc.name, desc.value);
         try {
             String res = jsonRun(user, exec, desc.group, desc.permissions, true);
             return JsonBuilder.create(res)
@@ -591,12 +576,12 @@
                 .build();
         } catch (HcatException e) {
             throw new HcatException("unable to add table property: " + table,
-                                    e.execBean, exec);
+                e.execBean, exec);
         }
     }
 
     private Map tableProperties(Object extendedTable) {
-        if (! (extendedTable instanceof Map))
+        if (!(extendedTable instanceof Map))
             return null;
         Map m = (Map) extendedTable;
         Map tableInfo = (Map) m.get("tableInfo");
@@ -611,8 +596,7 @@
      */
     public Response listPartitions(String user, String db, String table)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         String exec = "use " + db + "; ";
         exec += "show partitions " + table + "; ";
         try {
@@ -623,7 +607,7 @@
                 .build();
         } catch (HcatException e) {
             throw new HcatException("unable to show partitions for table: " + table,
-                                    e.execBean, exec);
+                e.execBean, exec);
         }
     }
 
@@ -633,8 +617,7 @@
     public Response descOnePartition(String user, String db, String table,
                                      String partition)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         String exec = "use " + db + "; ";
         exec += "show table extended like " + table
             + " partition (" + partition + "); ";
@@ -648,9 +631,9 @@
                 .build();
         } catch (HcatException e) {
             throw new HcatException("unable to show partition: "
-                                    + table + " " + partition,
-                                    e.execBean,
-                                    exec);
+                + table + " " + partition,
+                e.execBean,
+                exec);
         }
     }
 
@@ -660,8 +643,7 @@
     public Response addOnePartition(String user, String db, String table,
                                     PartitionDesc desc)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         String exec = String.format("use %s; alter table %s add", db, table);
         if (desc.ifNotExists)
             exec += " if not exists";
@@ -673,11 +655,11 @@
             String res = jsonRun(user, exec, desc.group, desc.permissions, true);
             if (res.indexOf("AlreadyExistsException") > -1) {
                 return JsonBuilder.create().
-                        put("error", "Partition already exists")
-                        .put("errorCode", "409")
-                        .put("database", db)
-                        .put("table", table)
-                        .put("partition", desc.partition).build();
+                    put("error", "Partition already exists")
+                    .put("errorCode", "409")
+                    .put("database", db)
+                    .put("table", table)
+                    .put("partition", desc.partition).build();
             }
             return JsonBuilder.create(res)
                 .put("database", db)
@@ -686,7 +668,7 @@
                 .build();
         } catch (HcatException e) {
             throw new HcatException("unable to add partition: " + desc,
-                                    e.execBean, exec);
+                e.execBean, exec);
         }
     }
 
@@ -697,8 +679,7 @@
                                   String table, String partition, boolean ifExists,
                                   String group, String permissions)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         String exec = String.format("use %s; alter table %s drop", db, table);
         if (ifExists)
             exec += " if exists";
@@ -713,7 +694,7 @@
                 .build();
         } catch (HcatException e) {
             throw new HcatException("unable to drop partition: " + partition,
-                                    e.execBean, exec);
+                e.execBean, exec);
         }
     }
 
@@ -723,13 +704,12 @@
      */
     public Response listColumns(String user, String db, String table)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         try {
             return descTable(user, db, table, false);
         } catch (HcatException e) {
             throw new HcatException("unable to show columns for table: " + table,
-                                    e.execBean, e.statement);
+                e.execBean, e.statement);
         }
     }
 
@@ -738,8 +718,7 @@
      */
     public Response descOneColumn(String user, String db, String table, String column)
         throws SimpleWebException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         Response res = listColumns(user, db, table);
         if (res.getStatus() != JsonBuilder.OK)
             return res;
@@ -748,7 +727,7 @@
         final Map fields = (o != null && (o instanceof Map)) ? (Map) o : null;
         if (fields == null)
             throw new SimpleWebException(500, "Internal error, unable to find column "
-                                         + column);
+                + column);
 
 
         List<Map> cols = (List) fields.get("columns");
@@ -763,9 +742,11 @@
         }
         if (found == null)
             throw new SimpleWebException(500, "unable to find column " + column,
-                                         new HashMap<String, Object>() {{
-                                                 put("description", fields);
-                                             }});
+                new HashMap<String, Object>() {
+                    {
+                        put("description", fields);
+                    }
+                });
         fields.remove("columns");
         fields.put("column", found);
         return Response.fromResponse(res).entity(fields).build();
@@ -777,10 +758,9 @@
     public Response addOneColumn(String user, String db, String table,
                                  ColumnDesc desc)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         String exec = String.format("use %s; alter table %s add columns (%s %s",
-                                    db, table, desc.name, desc.type);
+            db, table, desc.name, desc.type);
         if (TempletonUtils.isset(desc.comment))
             exec += String.format(" comment '%s'", desc.comment);
         exec += ");";
@@ -793,7 +773,7 @@
                 .build();
         } catch (HcatException e) {
             throw new HcatException("unable to add column: " + desc,
-                                    e.execBean, exec);
+                e.execBean, exec);
         }
     }
 
@@ -826,11 +806,10 @@
                            String group, String permissions,
                            boolean requireEmptyOutput)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         ExecBean res = run(user, exec, true, group, permissions);
 
-        if (! isValid(res, requireEmptyOutput))
+        if (!isValid(res, requireEmptyOutput))
             throw new HcatException("Failure calling hcat: " + exec, res, exec);
 
         return res.stdout;
@@ -840,8 +819,7 @@
     // permissions set.
     private String jsonRun(String user, String exec)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         return jsonRun(user, exec, null, null);
     }
 
@@ -849,8 +827,7 @@
     private String jsonRun(String user, String exec,
                            String group, String permissions)
         throws HcatException, NotAuthorizedException, BusyException,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         return jsonRun(user, exec, group, permissions, false);
     }
 }
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/HcatException.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/HcatException.java
index ab73adb..c4bf46f 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/HcatException.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/HcatException.java
@@ -27,10 +27,12 @@
     public String statement;
 
     public HcatException(String msg, final ExecBean bean, final String statement) {
-        super(500, msg, new HashMap<String, Object>() {{
-                    put("exec", bean);
-                    put("statement", statement);
-                }});
+        super(500, msg, new HashMap<String, Object>() {
+            {
+                put("exec", bean);
+                put("statement", statement);
+            }
+        });
         execBean = bean;
         this.statement = statement;
     }
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/JarDelegator.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/JarDelegator.java
index 889e990..53a8ca6 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/JarDelegator.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/JarDelegator.java
@@ -22,6 +22,7 @@
 import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.List;
+
 import org.apache.commons.exec.ExecuteException;
 import org.apache.hcatalog.templeton.tool.TempletonUtils;
 
@@ -40,12 +41,11 @@
                            List<String> jarArgs, List<String> defines,
                            String statusdir, String callback, String completedUrl)
         throws NotAuthorizedException, BadParam, BusyException, QueueException,
-        ExecuteException, IOException, InterruptedException
-    {
+        ExecuteException, IOException, InterruptedException {
         runAs = user;
         List<String> args = makeArgs(jar, mainClass,
-                                     libjars, files, jarArgs, defines,
-                                     statusdir, completedUrl);
+            libjars, files, jarArgs, defines,
+            statusdir, completedUrl);
 
         return enqueueController(user, callback, args);
     }
@@ -54,15 +54,14 @@
                                   String libjars, String files,
                                   List<String> jarArgs, List<String> defines,
                                   String statusdir, String completedUrl)
-        throws BadParam, IOException, InterruptedException
-    {
+        throws BadParam, IOException, InterruptedException {
         ArrayList<String> args = new ArrayList<String>();
         try {
             ArrayList<String> allFiles = new ArrayList();
             allFiles.add(TempletonUtils.hadoopFsFilename(jar, appConf, runAs));
 
             args.addAll(makeLauncherArgs(appConf, statusdir,
-                                         completedUrl, allFiles));
+                completedUrl, allFiles));
             args.add("--");
             args.add(appConf.clusterHadoop());
             args.add("jar");
@@ -72,12 +71,12 @@
             if (TempletonUtils.isset(libjars)) {
                 args.add("-libjars");
                 args.add(TempletonUtils.hadoopFsListAsString(libjars, appConf,
-                        runAs));
+                    runAs));
             }
             if (TempletonUtils.isset(files)) {
                 args.add("-files");
                 args.add(TempletonUtils.hadoopFsListAsString(files, appConf,
-                        runAs));
+                    runAs));
             }
 
             for (String d : defines)
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/JsonBuilder.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/JsonBuilder.java
index 5c1fe3f..997356d 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/JsonBuilder.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/JsonBuilder.java
@@ -23,6 +23,7 @@
 import java.util.Map;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
+
 import org.apache.hcatalog.templeton.tool.TempletonUtils;
 import org.codehaus.jackson.map.ObjectMapper;
 
@@ -40,8 +41,7 @@
 
     // Parse the json map.
     private JsonBuilder(String json)
-        throws IOException
-    {
+        throws IOException {
         map = jsonToMap(json);
     }
 
@@ -49,8 +49,7 @@
      * Create a new map object from the existing json.
      */
     public static JsonBuilder create(String json)
-        throws IOException
-    {
+        throws IOException {
         return new JsonBuilder(json);
     }
 
@@ -58,8 +57,7 @@
      * Create a new map object.
      */
     public static JsonBuilder create()
-        throws IOException
-    {
+        throws IOException {
         return new JsonBuilder(null);
     }
 
@@ -67,8 +65,7 @@
      * Create a new map error object.
      */
     public static JsonBuilder createError(String msg, int code)
-        throws IOException
-    {
+        throws IOException {
         return new JsonBuilder(null)
             .put("error", msg)
             .put("errorCode", code);
@@ -109,8 +106,7 @@
      * Turn the map back to json.
      */
     public String buildJson()
-        throws IOException
-    {
+        throws IOException {
         return mapToJson(map);
     }
 
@@ -153,8 +149,7 @@
     /**
      * Check if this is an error doc.
      */
-    public static boolean isError(Map obj)
-    {
+    public static boolean isError(Map obj) {
         return (obj != null) && obj.containsKey("error");
     }
 
@@ -162,9 +157,8 @@
      * Convert a json string to a Map.
      */
     public static Map jsonToMap(String json)
-        throws IOException
-    {
-        if (! TempletonUtils.isset(json))
+        throws IOException {
+        if (!TempletonUtils.isset(json))
             return new HashMap<String, Object>();
         else {
             ObjectMapper mapper = new ObjectMapper();
@@ -176,8 +170,7 @@
      * Convert a map to a json string.
      */
     public static String mapToJson(Object obj)
-        throws IOException
-    {
+        throws IOException {
         ObjectMapper mapper = new ObjectMapper();
         ByteArrayOutputStream out = new ByteArrayOutputStream();
         mapper.writeValue(out, obj);
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/LauncherDelegator.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/LauncherDelegator.java
index 857856c..93ab238 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/LauncherDelegator.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/LauncherDelegator.java
@@ -21,6 +21,7 @@
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.List;
+
 import org.apache.commons.exec.ExecuteException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -48,8 +49,7 @@
     }
 
     public void registerJob(String id, String user, String callback)
-        throws IOException
-    {
+        throws IOException {
         JobState state = null;
         try {
             state = new JobState(id, Main.getAppConfigInstance());
@@ -65,12 +65,11 @@
      * Enqueue the TempletonControllerJob directly calling doAs.
      */
     public EnqueueBean enqueueController(String user, String callback,
-                                                 List<String> args)
+                                         List<String> args)
         throws NotAuthorizedException, BusyException, ExecuteException,
-        IOException, QueueException
-    {
+        IOException, QueueException {
         try {
-            UserGroupInformation ugi = UgiFactory.getUgi(user); 
+            UserGroupInformation ugi = UgiFactory.getUgi(user);
 
             final long startTime = System.nanoTime();
 
@@ -91,24 +90,22 @@
     }
 
     private String queueAsUser(UserGroupInformation ugi, final List<String> args)
-        throws IOException, InterruptedException
-    {
+        throws IOException, InterruptedException {
         String id = ugi.doAs(new PrivilegedExceptionAction<String>() {
-                public String run() throws Exception {
-                    String[] array = new String[args.size()];
-                    TempletonControllerJob ctrl = new TempletonControllerJob();
-                    ToolRunner.run(ctrl, args.toArray(array));
-                    return ctrl.getSubmittedId();
-                }
-            });
+            public String run() throws Exception {
+                String[] array = new String[args.size()];
+                TempletonControllerJob ctrl = new TempletonControllerJob();
+                ToolRunner.run(ctrl, args.toArray(array));
+                return ctrl.getSubmittedId();
+            }
+        });
 
         return id;
     }
 
     public List<String> makeLauncherArgs(AppConfig appConf, String statusdir,
                                          String completedUrl,
-                                         List<String> copyFiles)
-    {
+                                         List<String> copyFiles) {
         ArrayList<String> args = new ArrayList<String>();
 
         args.add("-libjars");
@@ -122,9 +119,9 @@
         // Internal vars
         addDef(args, TempletonControllerJob.STATUSDIR_NAME, statusdir);
         addDef(args, TempletonControllerJob.COPY_NAME,
-               TempletonUtils.encodeArray(copyFiles));
+            TempletonUtils.encodeArray(copyFiles));
         addDef(args, TempletonControllerJob.OVERRIDE_CLASSPATH,
-               makeOverrideClasspath(appConf));
+            makeOverrideClasspath(appConf));
 
         // Job vars
         addStorageVars(args);
@@ -136,21 +133,21 @@
     // Storage vars
     private void addStorageVars(List<String> args) {
         addDef(args, TempletonStorage.STORAGE_CLASS,
-               appConf.get(TempletonStorage.STORAGE_CLASS));
+            appConf.get(TempletonStorage.STORAGE_CLASS));
         addDef(args, TempletonStorage.STORAGE_ROOT,
-                appConf.get(TempletonStorage.STORAGE_ROOT));
+            appConf.get(TempletonStorage.STORAGE_ROOT));
         addDef(args, ZooKeeperStorage.ZK_HOSTS,
-                appConf.get(ZooKeeperStorage.ZK_HOSTS));
+            appConf.get(ZooKeeperStorage.ZK_HOSTS));
         addDef(args, ZooKeeperStorage.ZK_SESSION_TIMEOUT,
-                appConf.get(ZooKeeperStorage.ZK_SESSION_TIMEOUT));
+            appConf.get(ZooKeeperStorage.ZK_SESSION_TIMEOUT));
     }
 
     // Completion notifier vars
     private void addCompletionVars(List<String> args, String completedUrl) {
         addDef(args, AppConfig.HADOOP_END_RETRY_NAME,
-               appConf.get(AppConfig.CALLBACK_RETRY_NAME));
+            appConf.get(AppConfig.CALLBACK_RETRY_NAME));
         addDef(args, AppConfig.HADOOP_END_INTERVAL_NAME,
-               appConf.get(AppConfig.CALLBACK_INTERVAL_NAME));
+            appConf.get(AppConfig.CALLBACK_INTERVAL_NAME));
         addDef(args, AppConfig.HADOOP_END_URL_NAME, completedUrl);
     }
 
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/ListDelegator.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/ListDelegator.java
index 1d6546d..40266cb 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/ListDelegator.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/ListDelegator.java
@@ -20,6 +20,7 @@
 import java.io.IOException;
 import java.util.List;
 import java.util.ArrayList;
+
 import org.apache.hadoop.mapred.JobStatus;
 import org.apache.hadoop.mapred.JobTracker;
 import org.apache.hadoop.mapred.TempletonJobTracker;
@@ -35,14 +36,13 @@
     }
 
     public List<String> run(String user)
-        throws NotAuthorizedException, BadParam, IOException
-    {
+        throws NotAuthorizedException, BadParam, IOException {
         UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
         TempletonJobTracker tracker = null;
         try {
             tracker = new TempletonJobTracker(ugi,
-                                              JobTracker.getAddress(appConf),
-                                              appConf);
+                JobTracker.getAddress(appConf),
+                appConf);
 
             ArrayList<String> ids = new ArrayList<String>();
 
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/Main.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/Main.java
index 46eee66..4c85a46 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/Main.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/Main.java
@@ -24,6 +24,7 @@
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.web.AuthFilter;
@@ -116,31 +117,30 @@
 
     private void checkEnv() {
         checkCurrentDirPermissions();
-        
+
     }
 
     private void checkCurrentDirPermissions() {
         //org.apache.commons.exec.DefaultExecutor requires
         // that current directory exists
         File pwd = new File(".");
-        if(!pwd.exists()){
+        if (!pwd.exists()) {
             String msg = "Server failed to start: templeton: Current working directory '.' does not exist!";
             System.err.println(msg);
-            LOG.fatal( msg);
+            LOG.fatal(msg);
             System.exit(1);
         }
     }
 
     public Server runServer(int port)
-        throws Exception
-    {
+        throws Exception {
 
         //Authenticate using keytab
-        if(UserGroupInformation.isSecurityEnabled()){
+        if (UserGroupInformation.isSecurityEnabled()) {
             UserGroupInformation.loginUserFromKeytab(conf.kerberosPrincipal(),
-                    conf.kerberosKeytab());
+                conf.kerberosKeytab());
         }
-        
+
         // Create the Jetty server
         Server server = new Server(port);
         ServletContextHandler root = new ServletContextHandler(server, "/");
@@ -166,11 +166,11 @@
         FilterHolder authFilter = new FilterHolder(AuthFilter.class);
         if (UserGroupInformation.isSecurityEnabled()) {
             authFilter.setInitParameter("dfs.web.authentication.signature.secret",
-                                        conf.kerberosSecret());
+                conf.kerberosSecret());
             authFilter.setInitParameter("dfs.web.authentication.kerberos.principal",
-                                        conf.kerberosPrincipal());
+                conf.kerberosPrincipal());
             authFilter.setInitParameter("dfs.web.authentication.kerberos.keytab",
-                                        conf.kerberosKeytab());
+                conf.kerberosKeytab());
         }
         return authFilter;
     }
@@ -181,7 +181,7 @@
         HashMap<String, Object> props = new HashMap<String, Object>();
         props.put("com.sun.jersey.api.json.POJOMappingFeature", "true");
         props.put("com.sun.jersey.config.property.WadlGeneratorConfig",
-                  "org.apache.hcatalog.templeton.WadlConfig");
+            "org.apache.hcatalog.templeton.WadlConfig");
         rc.setPropertiesAndFeatures(props);
 
         return rc;
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/PigDelegator.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/PigDelegator.java
index 17d4481..0e6bae4 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/PigDelegator.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/PigDelegator.java
@@ -23,6 +23,7 @@
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+
 import org.apache.commons.exec.ExecuteException;
 import org.apache.hcatalog.templeton.tool.TempletonUtils;
 
@@ -41,12 +42,11 @@
                            List<String> pigArgs, String otherFiles,
                            String statusdir, String callback, String completedUrl)
         throws NotAuthorizedException, BadParam, BusyException, QueueException,
-        ExecuteException, IOException, InterruptedException
-    {
+        ExecuteException, IOException, InterruptedException {
         runAs = user;
         List<String> args = makeArgs(execute,
-                                     srcFile, pigArgs,
-                                     otherFiles, statusdir, completedUrl);
+            srcFile, pigArgs,
+            otherFiles, statusdir, completedUrl);
 
         return enqueueController(user, callback, args);
     }
@@ -54,17 +54,15 @@
     private List<String> makeArgs(String execute, String srcFile,
                                   List<String> pigArgs, String otherFiles,
                                   String statusdir, String completedUrl)
-        throws BadParam, IOException, InterruptedException
-    {
+        throws BadParam, IOException, InterruptedException {
         ArrayList<String> args = new ArrayList<String>();
         try {
             ArrayList<String> allFiles = new ArrayList<String>();
             if (TempletonUtils.isset(srcFile))
                 allFiles.add(TempletonUtils.hadoopFsFilename
-                        (srcFile, appConf, runAs));
+                    (srcFile, appConf, runAs));
             if (TempletonUtils.isset(otherFiles)) {
-                String[] ofs = TempletonUtils.hadoopFsListAsArray
-                        (otherFiles, appConf, runAs);
+                String[] ofs = TempletonUtils.hadoopFsListAsArray(otherFiles, appConf, runAs);
                 allFiles.addAll(Arrays.asList(ofs));
             }
 
@@ -81,7 +79,7 @@
             } else if (TempletonUtils.isset(srcFile)) {
                 args.add("-file");
                 args.add(TempletonUtils.hadoopFsPath(srcFile, appConf, runAs)
-                        .getName());
+                    .getName());
             }
         } catch (FileNotFoundException e) {
             throw new BadParam(e.getMessage());
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/QueueStatusBean.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/QueueStatusBean.java
index e1e3f96..abe579a 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/QueueStatusBean.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/QueueStatusBean.java
@@ -18,6 +18,7 @@
 package org.apache.hcatalog.templeton;
 
 import java.io.IOException;
+
 import org.apache.hadoop.mapred.JobStatus;
 import org.apache.hadoop.mapred.JobProfile;
 import org.apache.hcatalog.templeton.tool.JobState;
@@ -29,15 +30,16 @@
     public JobStatus status;
     public JobProfile profile;
 
-    public String  id;
-    public String  parentId;
-    public String  percentComplete;
-    public Long    exitValue;
-    public String  user;
-    public String  callback;
-    public String  completed;
+    public String id;
+    public String parentId;
+    public String percentComplete;
+    public Long exitValue;
+    public String user;
+    public String callback;
+    public String completed;
 
-    public QueueStatusBean() {}
+    public QueueStatusBean() {
+    }
 
     /**
      * Create a new QueueStatusBean
@@ -47,19 +49,18 @@
      * @param profile    job profile
      */
     public QueueStatusBean(JobState state, JobStatus status, JobProfile profile)
-        throws IOException
-    {
+        throws IOException {
         this.status = status;
         this.profile = profile;
 
-        id              = profile.getJobID().toString();
-        parentId        = state.getId();
+        id = profile.getJobID().toString();
+        parentId = state.getId();
         if (id.equals(parentId))
             parentId = null;
         percentComplete = state.getPercentComplete();
-        exitValue       = state.getExitValue();
-        user            = state.getUser();
-        callback        = state.getCallback();
-        completed       = state.getCompleteStatus();
+        exitValue = state.getExitValue();
+        user = state.getUser();
+        callback = state.getCallback();
+        completed = state.getCompleteStatus();
     }
 }
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/SecureProxySupport.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/SecureProxySupport.java
index 98c85b3..77ce4d4 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/SecureProxySupport.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/SecureProxySupport.java
@@ -52,23 +52,26 @@
     }
 
     private static final Log LOG = LogFactory.getLog(SecureProxySupport.class);
-    
+
     /**
      * The file where we store the auth token
      */
-    public Path getTokenPath() { return( tokenPath ); }
+    public Path getTokenPath() {
+        return (tokenPath);
+    }
 
     /**
      * The token to pass to hcat.
      */
-    public String getHcatServiceStr() { return( HCAT_SERVICE ); }
+    public String getHcatServiceStr() {
+        return (HCAT_SERVICE);
+    }
 
     /**
      * Create the delegation token.
      */
     public Path open(String user, Configuration conf)
-        throws IOException, InterruptedException
-    {
+        throws IOException, InterruptedException {
         close();
         if (isEnabled) {
             this.user = user;
@@ -85,7 +88,7 @@
             msToken.decodeFromUrlString(hcatTokenStr);
             msToken.setService(new Text(HCAT_SERVICE));
             writeProxyDelegationTokens(fsToken, msToken, conf, user, tokenPath);
-            
+
         }
         return tokenPath;
     }
@@ -106,7 +109,7 @@
     public void addEnv(Map<String, String> env) {
         if (isEnabled) {
             env.put(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION,
-                    getTokenPath().toUri().getPath());
+                getTokenPath().toUri().getPath());
         }
     }
 
@@ -118,73 +121,70 @@
             args.add("-D");
             args.add("hive.metastore.token.signature=" + getHcatServiceStr());
             args.add("-D");
-            args.add("proxy.user.name=" + user);            
+            args.add("proxy.user.name=" + user);
         }
     }
-    
-    class TokenWrapper { 
+
+    class TokenWrapper {
         Token<?> token;
     }
 
     private Token<?> getFSDelegationToken(String user,
-                                           final Configuration conf)
-        throws IOException, InterruptedException
-    {
+                                          final Configuration conf)
+        throws IOException, InterruptedException {
         LOG.info("user: " + user + " loginUser: " + UserGroupInformation.getLoginUser().getUserName());
         final UserGroupInformation ugi = UgiFactory.getUgi(user);
 
-       final TokenWrapper twrapper = new TokenWrapper();
-       ugi.doAs(new PrivilegedExceptionAction<Object>() {
-           public Object run() throws IOException {
-               FileSystem fs = FileSystem.get(conf);
-               twrapper.token =  fs.getDelegationToken(ugi.getShortUserName());
-               return null;
-           }
-       });
-       return twrapper.token;
-       
+        final TokenWrapper twrapper = new TokenWrapper();
+        ugi.doAs(new PrivilegedExceptionAction<Object>() {
+            public Object run() throws IOException {
+                FileSystem fs = FileSystem.get(conf);
+                twrapper.token = fs.getDelegationToken(ugi.getShortUserName());
+                return null;
+            }
+        });
+        return twrapper.token;
+
     }
 
     private void writeProxyDelegationTokens(final Token<?> fsToken,
-            final Token<?> msToken,
-            final Configuration conf,
-            String user,
-            final Path tokenPath)
-                    throws IOException, InterruptedException{
-        
-        
-        LOG.info("user: " + user + " loginUser: " + UserGroupInformation.getLoginUser().getUserName());
-        final UserGroupInformation ugi  =  UgiFactory.getUgi(user);
+                                            final Token<?> msToken,
+                                            final Configuration conf,
+                                            String user,
+                                            final Path tokenPath)
+        throws IOException, InterruptedException {
 
-        
+
+        LOG.info("user: " + user + " loginUser: " + UserGroupInformation.getLoginUser().getUserName());
+        final UserGroupInformation ugi = UgiFactory.getUgi(user);
+
+
         ugi.doAs(new PrivilegedExceptionAction<Object>() {
-                     public Object run() throws IOException {
-                         Credentials cred = new Credentials();
-                         cred.addToken(fsToken.getService(), fsToken);
-                         cred.addToken(msToken.getService(), msToken);
-                         cred.writeTokenStorageFile(tokenPath, conf);
-                         return null;
-                     }
-                 });
-        
+            public Object run() throws IOException {
+                Credentials cred = new Credentials();
+                cred.addToken(fsToken.getService(), fsToken);
+                cred.addToken(msToken.getService(), msToken);
+                cred.writeTokenStorageFile(tokenPath, conf);
+                return null;
+            }
+        });
+
     }
-    
+
     private String buildHcatDelegationToken(String user)
-        throws IOException, InterruptedException, MetaException, TException
-    {
+        throws IOException, InterruptedException, MetaException, TException {
         HiveConf c = new HiveConf();
         final HiveMetaStoreClient client = new HiveMetaStoreClient(c);
         LOG.info("user: " + user + " loginUser: " + UserGroupInformation.getLoginUser().getUserName());
         final TokenWrapper twrapper = new TokenWrapper();
         final UserGroupInformation ugi = UgiFactory.getUgi(user);
         String s = ugi.doAs(new PrivilegedExceptionAction<String>() {
-                                public String run()
-                                    throws IOException, MetaException, TException
-                                {
-                                    String u = ugi.getUserName();
-                                    return client.getDelegationToken(u);
-                                }
-                            });
+            public String run()
+                throws IOException, MetaException, TException {
+                String u = ugi.getUserName();
+                return client.getDelegationToken(u);
+            }
+        });
         return s;
     }
 }
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/Server.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/Server.java
index f1c68aa..ce0eee1 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/Server.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/Server.java
@@ -39,6 +39,7 @@
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.SecurityContext;
 import javax.ws.rs.core.UriInfo;
+
 import org.apache.commons.exec.ExecuteException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -103,10 +104,14 @@
     private static AppConfig appConf = Main.getAppConfigInstance();
 
     // The SecurityContext set by AuthFilter
-    private @Context SecurityContext theSecurityContext;
+    private
+    @Context
+    SecurityContext theSecurityContext;
 
     // The uri requested
-    private @Context UriInfo theUriInfo;
+    private
+    @Context
+    UriInfo theUriInfo;
 
     private static final Log LOG = LogFactory.getLog(Server.class);
 
@@ -150,8 +155,7 @@
                         @FormParam("group") String group,
                         @FormParam("permissions") String permissions)
         throws NotAuthorizedException, BusyException, BadParam,
-        ExecuteException, IOException
-    {
+        ExecuteException, IOException {
         verifyUser();
         verifyParam(exec, "exec");
 
@@ -168,13 +172,12 @@
     public Response listTables(@PathParam("db") String db,
                                @QueryParam("like") String tablePattern)
         throws HcatException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
         verifyDdlParam(db, ":db");
 
         HcatDelegator d = new HcatDelegator(appConf, execService);
-        if (! TempletonUtils.isset(tablePattern))
+        if (!TempletonUtils.isset(tablePattern))
             tablePattern = "*";
         return d.listTables(getUser(), db, tablePattern);
     }
@@ -189,8 +192,7 @@
                                 @PathParam("table") String table,
                                 TableDesc desc)
         throws SimpleWebException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
         verifyDdlParam(db, ":db");
         verifyDdlParam(table, ":table");
@@ -211,8 +213,7 @@
                                     @PathParam("newTable") String newTable,
                                     TableLikeDesc desc)
         throws SimpleWebException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
         verifyDdlParam(db, ":db");
         verifyDdlParam(existingTable, ":existingTable");
@@ -236,8 +237,7 @@
                               @PathParam("table") String table,
                               @QueryParam("format") String format)
         throws HcatException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
         verifyDdlParam(db, ":db");
         verifyDdlParam(table, ":table");
@@ -261,8 +261,7 @@
                               @QueryParam("group") String group,
                               @QueryParam("permissions") String permissions)
         throws HcatException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
         verifyDdlParam(db, ":db");
         verifyDdlParam(table, ":table");
@@ -283,8 +282,7 @@
                                 @FormParam("group") String group,
                                 @FormParam("permissions") String permissions)
         throws HcatException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
         verifyDdlParam(db, ":db");
         verifyDdlParam(oldTable, ":table");
@@ -304,8 +302,7 @@
                                          @PathParam("table") String table,
                                          @PathParam("property") String property)
         throws HcatException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
         verifyDdlParam(db, ":db");
         verifyDdlParam(table, ":table");
@@ -324,8 +321,7 @@
     public Response listTableProperties(@PathParam("db") String db,
                                         @PathParam("table") String table)
         throws HcatException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
         verifyDdlParam(db, ":db");
         verifyDdlParam(table, ":table");
@@ -345,8 +341,7 @@
                                         @PathParam("property") String property,
                                         TablePropertyDesc desc)
         throws HcatException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
         verifyDdlParam(db, ":db");
         verifyDdlParam(table, ":table");
@@ -366,8 +361,7 @@
     public Response listPartitions(@PathParam("db") String db,
                                    @PathParam("table") String table)
         throws HcatException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
         verifyDdlParam(db, ":db");
         verifyDdlParam(table, ":table");
@@ -386,8 +380,7 @@
                                   @PathParam("table") String table,
                                   @PathParam("partition") String partition)
         throws HcatException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
         verifyDdlParam(db, ":db");
         verifyDdlParam(table, ":table");
@@ -408,8 +401,7 @@
                                     @PathParam("partition") String partition,
                                     PartitionDesc desc)
         throws HcatException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
         verifyDdlParam(db, ":db");
         verifyDdlParam(table, ":table");
@@ -432,15 +424,14 @@
                                   @QueryParam("group") String group,
                                   @QueryParam("permissions") String permissions)
         throws HcatException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
         verifyDdlParam(db, ":db");
         verifyDdlParam(table, ":table");
         verifyParam(partition, ":partition");
         HcatDelegator d = new HcatDelegator(appConf, execService);
         return d.dropPartition(getUser(), db, table, partition, ifExists,
-                               group, permissions);
+            group, permissions);
     }
 
     /**
@@ -451,12 +442,11 @@
     @Produces(MediaType.APPLICATION_JSON)
     public Response listDatabases(@QueryParam("like") String dbPattern)
         throws HcatException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
 
         HcatDelegator d = new HcatDelegator(appConf, execService);
-        if (! TempletonUtils.isset(dbPattern))
+        if (!TempletonUtils.isset(dbPattern))
             dbPattern = "*";
         return d.listDatabases(getUser(), dbPattern);
     }
@@ -470,8 +460,7 @@
     public Response descDatabase(@PathParam("db") String db,
                                  @QueryParam("format") String format)
         throws HcatException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
         verifyDdlParam(db, ":db");
         HcatDelegator d = new HcatDelegator(appConf, execService);
@@ -487,8 +476,7 @@
     public Response createDatabase(@PathParam("db") String db,
                                    DatabaseDesc desc)
         throws HcatException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
         verifyDdlParam(db, ":db");
         desc.database = db;
@@ -508,15 +496,14 @@
                                  @QueryParam("group") String group,
                                  @QueryParam("permissions") String permissions)
         throws HcatException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
         verifyDdlParam(db, ":db");
         if (TempletonUtils.isset(option))
             verifyDdlParam(option, "option");
         HcatDelegator d = new HcatDelegator(appConf, execService);
         return d.dropDatabase(getUser(), db, ifExists, option,
-                              group, permissions);
+            group, permissions);
     }
 
     /**
@@ -529,8 +516,7 @@
     public Response listColumns(@PathParam("db") String db,
                                 @PathParam("table") String table)
         throws HcatException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
         verifyDdlParam(db, ":db");
         verifyDdlParam(table, ":table");
@@ -549,8 +535,7 @@
                                @PathParam("table") String table,
                                @PathParam("column") String column)
         throws SimpleWebException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
         verifyDdlParam(db, ":db");
         verifyDdlParam(table, ":table");
@@ -571,8 +556,7 @@
                                  @PathParam("column") String column,
                                  ColumnDesc desc)
         throws HcatException, NotAuthorizedException, BusyException,
-        BadParam, ExecuteException, IOException
-    {
+        BadParam, ExecuteException, IOException {
         verifyUser();
         verifyDdlParam(db, ":db");
         verifyDdlParam(table, ":table");
@@ -601,8 +585,7 @@
                                           @FormParam("statusdir") String statusdir,
                                           @FormParam("callback") String callback)
         throws NotAuthorizedException, BusyException, BadParam, QueueException,
-        ExecuteException, IOException, InterruptedException
-    {
+        ExecuteException, IOException, InterruptedException {
         verifyUser();
         verifyParam(inputs, "input");
         verifyParam(mapper, "mapper");
@@ -610,8 +593,8 @@
 
         StreamingDelegator d = new StreamingDelegator(appConf);
         return d.run(getUser(), inputs, output, mapper, reducer,
-                     files, defines, cmdenvs, args,
-                     statusdir, callback, getCompletedUrl());
+            files, defines, cmdenvs, args,
+            statusdir, callback, getCompletedUrl());
     }
 
     /**
@@ -629,17 +612,16 @@
                                     @FormParam("statusdir") String statusdir,
                                     @FormParam("callback") String callback)
         throws NotAuthorizedException, BusyException, BadParam, QueueException,
-        ExecuteException, IOException, InterruptedException
-    {
+        ExecuteException, IOException, InterruptedException {
         verifyUser();
         verifyParam(jar, "jar");
         verifyParam(mainClass, "class");
 
         JarDelegator d = new JarDelegator(appConf);
         return d.run(getUser(),
-                     jar, mainClass,
-                     libjars, files, args, defines,
-                     statusdir, callback, getCompletedUrl());
+            jar, mainClass,
+            libjars, files, args, defines,
+            statusdir, callback, getCompletedUrl());
     }
 
     /**
@@ -655,17 +637,16 @@
                            @FormParam("statusdir") String statusdir,
                            @FormParam("callback") String callback)
         throws NotAuthorizedException, BusyException, BadParam, QueueException,
-        ExecuteException, IOException, InterruptedException
-    {
+        ExecuteException, IOException, InterruptedException {
         verifyUser();
         if (execute == null && srcFile == null)
             throw new BadParam("Either execute or file parameter required");
 
         PigDelegator d = new PigDelegator(appConf);
         return d.run(getUser(),
-                     execute, srcFile,
-                     pigArgs, otherFiles,
-                     statusdir, callback, getCompletedUrl());
+            execute, srcFile,
+            pigArgs, otherFiles,
+            statusdir, callback, getCompletedUrl());
     }
 
     /**
@@ -680,15 +661,14 @@
                             @FormParam("statusdir") String statusdir,
                             @FormParam("callback") String callback)
         throws NotAuthorizedException, BusyException, BadParam, QueueException,
-        ExecuteException, IOException, InterruptedException
-    {
+        ExecuteException, IOException, InterruptedException {
         verifyUser();
         if (execute == null && srcFile == null)
             throw new BadParam("Either execute or file parameter required");
 
         HiveDelegator d = new HiveDelegator(appConf);
         return d.run(getUser(), execute, srcFile, defines,
-                     statusdir, callback, getCompletedUrl());
+            statusdir, callback, getCompletedUrl());
     }
 
     /**
@@ -698,8 +678,7 @@
     @Path("queue/{jobid}")
     @Produces({MediaType.APPLICATION_JSON})
     public QueueStatusBean showQueueId(@PathParam("jobid") String jobid)
-        throws NotAuthorizedException, BadParam, IOException
-    {
+        throws NotAuthorizedException, BadParam, IOException {
         verifyUser();
         verifyParam(jobid, ":jobid");
 
@@ -714,8 +693,7 @@
     @Path("queue/{jobid}")
     @Produces({MediaType.APPLICATION_JSON})
     public QueueStatusBean deleteQueueId(@PathParam("jobid") String jobid)
-        throws NotAuthorizedException, BadParam, IOException
-    {
+        throws NotAuthorizedException, BadParam, IOException {
         verifyUser();
         verifyParam(jobid, ":jobid");
 
@@ -730,8 +708,7 @@
     @Path("queue")
     @Produces({MediaType.APPLICATION_JSON})
     public List<String> showQueueList()
-        throws NotAuthorizedException, BadParam, IOException
-    {
+        throws NotAuthorizedException, BadParam, IOException {
         verifyUser();
 
         ListDelegator d = new ListDelegator(appConf);
@@ -745,8 +722,7 @@
     @Path("internal/complete/{jobid}")
     @Produces({MediaType.APPLICATION_JSON})
     public CompleteBean completeJob(@PathParam("jobid") String jobid)
-        throws CallbackFailedException, IOException
-    {
+        throws CallbackFailedException, IOException {
         CompleteDelegator d = new CompleteDelegator(appConf);
         return d.run(jobid);
     }
@@ -755,11 +731,10 @@
      * Verify that we have a valid user.  Throw an exception if invalid.
      */
     public void verifyUser()
-        throws NotAuthorizedException
-    {
+        throws NotAuthorizedException {
         if (getUser() == null) {
             String msg = "No user found.";
-            if (! UserGroupInformation.isSecurityEnabled())
+            if (!UserGroupInformation.isSecurityEnabled())
                 msg += "  Missing " + PseudoAuthenticator.USER_NAME + " parameter.";
             throw new NotAuthorizedException(msg);
         }
@@ -769,8 +744,7 @@
      * Verify that the parameter exists.  Throw an exception if invalid.
      */
     public void verifyParam(String param, String name)
-        throws BadParam
-    {
+        throws BadParam {
         if (param == null)
             throw new BadParam("Missing " + name + " parameter");
     }
@@ -779,8 +753,7 @@
      * Verify that the parameter exists.  Throw an exception if invalid.
      */
     public void verifyParam(List<String> param, String name)
-        throws BadParam
-    {
+        throws BadParam {
         if (param == null || param.isEmpty())
             throw new BadParam("Missing " + name + " parameter");
     }
@@ -794,12 +767,11 @@
      * Bug: This needs to allow for quoted ddl identifiers.
      */
     public void verifyDdlParam(String param, String name)
-        throws BadParam
-    {
+        throws BadParam {
         verifyParam(param, name);
         Matcher m = DDL_ID.matcher(param);
-        if (! m.matches())
-            throw new BadParam("Invalid DDL identifier " + name );
+        if (!m.matches())
+            throw new BadParam("Invalid DDL identifier " + name);
     }
 
     /**
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/SimpleExceptionMapper.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/SimpleExceptionMapper.java
index ee40fd0..fb83d5a 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/SimpleExceptionMapper.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/SimpleExceptionMapper.java
@@ -27,8 +27,7 @@
  */
 @Provider
 public class SimpleExceptionMapper
-    implements ExceptionMapper<SimpleWebException>
-{
+    implements ExceptionMapper<SimpleWebException> {
     public Response toResponse(SimpleWebException e) {
         return e.getResponse();
     }
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/SimpleWebException.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/SimpleWebException.java
index 6961d16..3218aa9 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/SimpleWebException.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/SimpleWebException.java
@@ -22,6 +22,7 @@
 import java.util.HashMap;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
+
 import org.codehaus.jackson.map.ObjectMapper;
 
 /**
@@ -50,9 +51,8 @@
     }
 
     public static Response buildMessage(int httpCode, Map<String, Object> params,
-                                        String msg)
-    {
-        HashMap<String,Object> err = new HashMap<String,Object>();
+                                        String msg) {
+        HashMap<String, Object> err = new HashMap<String, Object>();
         err.put("error", msg);
         if (params != null)
             err.putAll(params);
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/StatusDelegator.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/StatusDelegator.java
index a7e22f2..5e9c761 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/StatusDelegator.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/StatusDelegator.java
@@ -18,6 +18,7 @@
 package org.apache.hcatalog.templeton;
 
 import java.io.IOException;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.mapred.JobID;
@@ -39,15 +40,14 @@
     }
 
     public QueueStatusBean run(String user, String id)
-        throws NotAuthorizedException, BadParam, IOException
-    {
+        throws NotAuthorizedException, BadParam, IOException {
         UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
         TempletonJobTracker tracker = null;
         JobState state = null;
         try {
             tracker = new TempletonJobTracker(ugi,
-                                              JobTracker.getAddress(appConf),
-                                              appConf);
+                JobTracker.getAddress(appConf),
+                appConf);
             JobID jobid = StatusDelegator.StringToJobID(id);
             if (jobid == null)
                 throw new BadParam("Invalid jobid: " + id);
@@ -67,8 +67,7 @@
                                              JobID jobid,
                                              String childid,
                                              JobState state)
-        throws BadParam, IOException
-    {
+        throws BadParam, IOException {
         JobID bestid = jobid;
         if (childid != null)
             bestid = StatusDelegator.StringToJobID(childid);
@@ -94,8 +93,7 @@
     public static QueueStatusBean makeStatus(TempletonJobTracker tracker,
                                              JobID jobid,
                                              JobState state)
-        throws BadParam, IOException
-    {
+        throws BadParam, IOException {
         return makeStatus(tracker, jobid, state.getChildId(), state);
     }
 
@@ -103,8 +101,7 @@
      * A version of JobID.forName with our app specific error handling.
      */
     public static JobID StringToJobID(String id)
-        throws BadParam
-    {
+        throws BadParam {
         try {
             return JobID.forName(id);
         } catch (IllegalArgumentException e) {
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/StreamingDelegator.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/StreamingDelegator.java
index 28e549c..6c44ab4 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/StreamingDelegator.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/StreamingDelegator.java
@@ -20,11 +20,12 @@
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+
 import org.apache.commons.exec.ExecuteException;
 
 /**
  * Submit a streaming job to the MapReduce queue.  Really just a front
-   end to the JarDelegator.
+ end to the JarDelegator.
  *
  * This is the backend of the mapreduce/streaming web service.
  */
@@ -43,16 +44,15 @@
                            String callback,
                            String completedUrl)
         throws NotAuthorizedException, BadParam, BusyException, QueueException,
-        ExecuteException, IOException, InterruptedException
-    {
+        ExecuteException, IOException, InterruptedException {
         List<String> args = makeArgs(inputs, output, mapper, reducer,
-                                     files, defines, cmdenvs, jarArgs);
+            files, defines, cmdenvs, jarArgs);
 
         JarDelegator d = new JarDelegator(appConf);
         return d.run(user,
-                     appConf.streamingJar(), null,
-                     null, null, args, defines,
-                     statusdir, callback, completedUrl);
+            appConf.streamingJar(), null,
+            null, null, args, defines,
+            statusdir, callback, completedUrl);
     }
 
     private List<String> makeArgs(List<String> inputs,
@@ -62,8 +62,7 @@
                                   List<String> files,
                                   List<String> defines,
                                   List<String> cmdenvs,
-                                  List<String> jarArgs)
-    {
+                                  List<String> jarArgs) {
         ArrayList<String> args = new ArrayList<String>();
         for (String input : inputs) {
             args.add("-input");
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/TableDesc.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/TableDesc.java
index 28cf046..9636f6d 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/TableDesc.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/TableDesc.java
@@ -40,7 +40,8 @@
     /**
      * Create a new TableDesc
      */
-    public TableDesc() {}
+    public TableDesc() {
+    }
 
     public String toString() {
         return String.format("TableDesc(table=%s, columns=%s)", table, columns);
@@ -49,18 +50,18 @@
     public boolean equals(Object o) {
         if (this == o)
             return true;
-        if (! (o instanceof TableDesc))
+        if (!(o instanceof TableDesc))
             return false;
         TableDesc that = (TableDesc) o;
-        return xequals(this.external,        that.external)
-            && xequals(this.ifNotExists,     that.ifNotExists)
-            && xequals(this.table,           that.table)
-            && xequals(this.comment,         that.comment)
-            && xequals(this.columns,         that.columns)
-            && xequals(this.partitionedBy,   that.partitionedBy)
-            && xequals(this.clusteredBy,     that.clusteredBy)
-            && xequals(this.format,          that.format)
-            && xequals(this.location,        that.location)
+        return xequals(this.external, that.external)
+            && xequals(this.ifNotExists, that.ifNotExists)
+            && xequals(this.table, that.table)
+            && xequals(this.comment, that.comment)
+            && xequals(this.columns, that.columns)
+            && xequals(this.partitionedBy, that.partitionedBy)
+            && xequals(this.clusteredBy, that.clusteredBy)
+            && xequals(this.format, that.format)
+            && xequals(this.location, that.location)
             && xequals(this.tableProperties, that.tableProperties)
             && super.equals(that)
             ;
@@ -75,7 +76,8 @@
         public List<ClusterSortOrderDesc> sortedBy;
         public int numberOfBuckets;
 
-        public ClusteredByDesc() {}
+        public ClusteredByDesc() {
+        }
 
         public String toString() {
             String fmt
@@ -86,11 +88,11 @@
         public boolean equals(Object o) {
             if (this == o)
                 return true;
-            if (! (o instanceof ClusteredByDesc))
+            if (!(o instanceof ClusteredByDesc))
                 return false;
             ClusteredByDesc that = (ClusteredByDesc) o;
-            return xequals(this.columnNames,     that.columnNames)
-                && xequals(this.sortedBy,        that.sortedBy)
+            return xequals(this.columnNames, that.columnNames)
+                && xequals(this.sortedBy, that.sortedBy)
                 && xequals(this.numberOfBuckets, that.numberOfBuckets)
                 ;
         }
@@ -104,7 +106,8 @@
         public String columnName;
         public SortDirectionDesc order;
 
-        public ClusterSortOrderDesc() {}
+        public ClusterSortOrderDesc() {
+        }
 
         public ClusterSortOrderDesc(String columnName, SortDirectionDesc order) {
             this.columnName = columnName;
@@ -114,17 +117,17 @@
         public String toString() {
             return String
                 .format("ClusterSortOrderDesc(columnName=%s, order=%s)",
-                        columnName, order);
+                    columnName, order);
         }
 
         public boolean equals(Object o) {
             if (this == o)
                 return true;
-            if (! (o instanceof ClusterSortOrderDesc))
+            if (!(o instanceof ClusterSortOrderDesc))
                 return false;
             ClusterSortOrderDesc that = (ClusterSortOrderDesc) o;
-            return xequals(this.columnName,     that.columnName)
-                && xequals(this.order,          that.order)
+            return xequals(this.columnName, that.columnName)
+                && xequals(this.order, that.order)
                 ;
         }
     }
@@ -146,17 +149,18 @@
         public String storedAs;
         public StoredByDesc storedBy;
 
-        public StorageFormatDesc() {}
+        public StorageFormatDesc() {
+        }
 
         public boolean equals(Object o) {
             if (this == o)
                 return true;
-            if (! (o instanceof StorageFormatDesc))
+            if (!(o instanceof StorageFormatDesc))
                 return false;
             StorageFormatDesc that = (StorageFormatDesc) o;
-            return xequals(this.rowFormat,      that.rowFormat)
-                && xequals(this.storedAs,       that.storedAs)
-                && xequals(this.storedBy,       that.storedBy)
+            return xequals(this.rowFormat, that.rowFormat)
+                && xequals(this.storedAs, that.storedAs)
+                && xequals(this.storedBy, that.storedBy)
                 ;
         }
     }
@@ -172,20 +176,21 @@
         public String linesTerminatedBy;
         public SerdeDesc serde;
 
-        public RowFormatDesc() {}
+        public RowFormatDesc() {
+        }
 
         public boolean equals(Object o) {
             if (this == o)
                 return true;
-            if (! (o instanceof RowFormatDesc))
+            if (!(o instanceof RowFormatDesc))
                 return false;
             RowFormatDesc that = (RowFormatDesc) o;
-            return xequals(this.fieldsTerminatedBy,     that.fieldsTerminatedBy)
+            return xequals(this.fieldsTerminatedBy, that.fieldsTerminatedBy)
                 && xequals(this.collectionItemsTerminatedBy,
-                           that.collectionItemsTerminatedBy)
-                && xequals(this.mapKeysTerminatedBy,    that.mapKeysTerminatedBy)
-                && xequals(this.linesTerminatedBy,      that.linesTerminatedBy)
-                && xequals(this.serde,                  that.serde)
+                    that.collectionItemsTerminatedBy)
+                && xequals(this.mapKeysTerminatedBy, that.mapKeysTerminatedBy)
+                && xequals(this.linesTerminatedBy, that.linesTerminatedBy)
+                && xequals(this.serde, that.serde)
                 ;
         }
     }
@@ -198,16 +203,17 @@
         public String name;
         public Map<String, String> properties;
 
-        public SerdeDesc() {}
+        public SerdeDesc() {
+        }
 
         public boolean equals(Object o) {
             if (this == o)
                 return true;
-            if (! (o instanceof SerdeDesc))
+            if (!(o instanceof SerdeDesc))
                 return false;
             SerdeDesc that = (SerdeDesc) o;
-            return xequals(this.name,           that.name)
-                && xequals(this.properties,     that.properties)
+            return xequals(this.name, that.name)
+                && xequals(this.properties, that.properties)
                 ;
         }
     }
@@ -220,16 +226,17 @@
         public String className;
         public Map<String, String> properties;
 
-        public StoredByDesc() {}
+        public StoredByDesc() {
+        }
 
         public boolean equals(Object o) {
             if (this == o)
                 return true;
-            if (! (o instanceof StoredByDesc))
+            if (!(o instanceof StoredByDesc))
                 return false;
             StoredByDesc that = (StoredByDesc) o;
-            return xequals(this.className,      that.className)
-                && xequals(this.properties,     that.properties)
+            return xequals(this.className, that.className)
+                && xequals(this.properties, that.properties)
                 ;
         }
     }
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/TableLikeDesc.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/TableLikeDesc.java
index 689107b..cb09e54 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/TableLikeDesc.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/TableLikeDesc.java
@@ -30,10 +30,11 @@
     public String existingTable;
     public String newTable;
 
-    public TableLikeDesc() {}
+    public TableLikeDesc() {
+    }
 
     public String toString() {
         return String.format("TableLikeDesc(existingTable=%s, newTable=%s, location=%s",
-                             existingTable, newTable, location);
+            existingTable, newTable, location);
     }
 }
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/UgiFactory.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/UgiFactory.java
index 886163c..5763ba3 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/UgiFactory.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/UgiFactory.java
@@ -24,15 +24,15 @@
 
 public class UgiFactory {
     private static ConcurrentHashMap<String, UserGroupInformation> userUgiMap =
-            new ConcurrentHashMap<String, UserGroupInformation>();
-    
-    static UserGroupInformation getUgi(String user) throws IOException{
+        new ConcurrentHashMap<String, UserGroupInformation>();
+
+    static UserGroupInformation getUgi(String user) throws IOException {
         UserGroupInformation ugi = userUgiMap.get(user);
-        if(ugi == null){
+        if (ugi == null) {
             //create new ugi and add to map
-            final UserGroupInformation newUgi = 
-                    UserGroupInformation.createProxyUser(user,
-                            UserGroupInformation.getLoginUser());
+            final UserGroupInformation newUgi =
+                UserGroupInformation.createProxyUser(user,
+                    UserGroupInformation.getLoginUser());
 
             //if another thread adds an entry before the check in this one
             // the one created here will not be added.
@@ -40,10 +40,10 @@
 
             //use the UGI object that got added
             return userUgiMap.get(user);
-            
+
         }
         return ugi;
     }
-    
-    
+
+
 }
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/WadlConfig.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/WadlConfig.java
index 3dc2ab1..1e60efa 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/WadlConfig.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/WadlConfig.java
@@ -26,15 +26,15 @@
 /**
  * Simple class that incorporates javadoc information into the
  * wadl produced by jersey.
- * 
+ *
  */
 public class WadlConfig extends WadlGeneratorConfig {
-    
+
     @Override
     public List<WadlGeneratorDescription> configure() {
-        return generator( WadlGeneratorResourceDocSupport.class ) 
-            .prop( "resourceDocStream", "resourcedoc.xml" ) 
-        .descriptions();
+        return generator(WadlGeneratorResourceDocSupport.class)
+            .prop("resourceDocStream", "resourcedoc.xml")
+            .descriptions();
     }
- 
+
 }
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/HDFSStorage.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/HDFSStorage.java
index 4c21b58..6827fca 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/HDFSStorage.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/HDFSStorage.java
@@ -75,7 +75,7 @@
             out.write(val);
         } catch (IOException e) {
             LOG.info("Couldn't write to " + getPath(type) + "/" + id + ": "
-                     + e.getMessage());
+                + e.getMessage());
         } finally {
             try {
                 out.flush();
@@ -90,13 +90,12 @@
     public String getField(Type type, String id, String key) {
         BufferedReader in = null;
         try {
-            in = new BufferedReader(new InputStreamReader
-                                    (fs.open(new Path(getPath(type) + "/" +
-                                                      id + "/" + key))));
+            in = new BufferedReader(new InputStreamReader(fs.open(new Path(getPath(type) + "/" +
+                id + "/" + key))));
             String line = null;
             String val = "";
             while ((line = in.readLine()) != null) {
-                if (! val.equals("")) {
+                if (!val.equals("")) {
                     val += "\n";
                 }
                 val += line;
@@ -104,7 +103,7 @@
             return val;
         } catch (IOException e) {
             LOG.trace("Couldn't find " + getPath(type) + "/" + id + "/" + key
-                      + ": " + e.getMessage());
+                + ": " + e.getMessage());
         } finally {
             try {
                 in.close();
@@ -120,10 +119,8 @@
         HashMap<String, String> map = new HashMap<String, String>();
         BufferedReader in = null;
         try {
-            for (FileStatus status : fs.listStatus
-                     (new Path(getPath(type) + "/" + id))) {
-                in = new BufferedReader(new InputStreamReader
-                                        (fs.open(status.getPath())));
+            for (FileStatus status : fs.listStatus(new Path(getPath(type) + "/" + id))) {
+                in = new BufferedReader(new InputStreamReader(fs.open(status.getPath())));
                 String line = null;
                 String val = "";
                 while ((line = in.readLine()) != null) {
@@ -152,7 +149,7 @@
             fs.delete(new Path(getPath(type) + "/" + id), true);
         } catch (IOException e) {
             throw new NotFoundException("Node " + id + " was not found: " +
-                                        e.getMessage());
+                e.getMessage());
         }
         return false;
     }
@@ -160,7 +157,7 @@
     @Override
     public List<String> getAll() {
         ArrayList<String> allNodes = new ArrayList<String>();
-        for (Type type: Type.values()) {
+        for (Type type : Type.values()) {
             allNodes.addAll(getAllForType(type));
         }
         return allNodes;
@@ -189,7 +186,7 @@
             }
         } catch (Exception e) {
             LOG.trace("Couldn't find children for key " + key + ": " +
-                      e.getMessage());
+                e.getMessage());
         }
         return allNodes;
     }
@@ -200,7 +197,7 @@
         HashMap<String, String> map = new HashMap<String, String>();
         try {
             for (FileStatus status :
-                     fs.listStatus(new Path(getPath(type)))) {
+                fs.listStatus(new Path(getPath(type)))) {
                 map = (HashMap<String, String>)
                     getFields(type, status.getPath().getName());
                 if (map.get(key).equals(value)) {
@@ -209,7 +206,7 @@
             }
         } catch (Exception e) {
             LOG.trace("Couldn't find children for key " + key + ": " +
-                      e.getMessage());
+                e.getMessage());
         }
         return allNodes;
     }
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/JobState.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/JobState.java
index 4296e36..0b884f7 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/JobState.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/JobState.java
@@ -43,16 +43,14 @@
     private Configuration config = null;
 
     public JobState(String id, Configuration conf)
-        throws IOException
-    {
+        throws IOException {
         this.id = id;
         config = conf;
         storage = getStorage(conf);
     }
 
     public void delete()
-        throws IOException
-    {
+        throws IOException {
         try {
             storage.delete(type, id);
         } catch (Exception e) {
@@ -114,28 +112,26 @@
     /**
      * The percent complete of a job
      */
-    public String  getPercentComplete()
-        throws IOException
-    {
+    public String getPercentComplete()
+        throws IOException {
         return getField("percentComplete");
     }
+
     public void setPercentComplete(String percent)
-        throws IOException
-    {
+        throws IOException {
         setField("percentComplete", percent);
     }
 
     /**
      * The child id of TempletonControllerJob
      */
-    public String  getChildId()
-        throws IOException
-    {
+    public String getChildId()
+        throws IOException {
         return getField("childid");
     }
+
     public void setChildId(String childid)
-        throws IOException
-    {
+        throws IOException {
         setField("childid", childid);
     }
 
@@ -200,13 +196,12 @@
      * The system exit value of the job.
      */
     public Long getExitValue()
-        throws IOException
-    {
+        throws IOException {
         return getLongField("exitValue");
     }
+
     public void setExitValue(long exitValue)
-        throws IOException
-    {
+        throws IOException {
         setLongField("exitValue", exitValue);
     }
 
@@ -214,13 +209,12 @@
      * When this job was created.
      */
     public Long getCreated()
-        throws IOException
-    {
+        throws IOException {
         return getLongField("created");
     }
+
     public void setCreated(long created)
-        throws IOException
-    {
+        throws IOException {
         setLongField("created", created);
     }
 
@@ -228,13 +222,12 @@
      * The user who started this job.
      */
     public String getUser()
-        throws IOException
-    {
+        throws IOException {
         return getField("user");
     }
+
     public void setUser(String user)
-        throws IOException
-    {
+        throws IOException {
         setField("user", user);
     }
 
@@ -242,13 +235,12 @@
      * The url callback
      */
     public String getCallback()
-        throws IOException
-    {
+        throws IOException {
         return getField("callback");
     }
+
     public void setCallback(String callback)
-        throws IOException
-    {
+        throws IOException {
         setField("callback", callback);
     }
 
@@ -256,13 +248,12 @@
      * The status of a job once it is completed.
      */
     public String getCompleteStatus()
-        throws IOException
-    {
+        throws IOException {
         return getField("completed");
     }
+
     public void setCompleteStatus(String complete)
-        throws IOException
-    {
+        throws IOException {
         setField("completed", complete);
     }
 
@@ -270,13 +261,12 @@
      * The time when the callback was sent.
      */
     public Long getNotifiedTime()
-        throws IOException
-    {
+        throws IOException {
         return getLongField("notified");
     }
+
     public void setNotifiedTime(long notified)
-        throws IOException
-    {
+        throws IOException {
         setLongField("notified", notified);
     }
 
@@ -288,8 +278,7 @@
      * Fetch an integer field from the store.
      */
     public Long getLongField(String name)
-        throws IOException
-    {
+        throws IOException {
         String s = storage.getField(type, id, name);
         if (s == null)
             return null;
@@ -297,7 +286,7 @@
             try {
                 return new Long(s);
             } catch (NumberFormatException e) {
-                LOG.error("templeton: bug " + name + " " + s + " : "+ e);
+                LOG.error("templeton: bug " + name + " " + s + " : " + e);
                 return null;
             }
         }
@@ -307,8 +296,7 @@
      * Store a String field from the store.
      */
     public void setField(String name, String val)
-        throws IOException
-    {
+        throws IOException {
         try {
             storage.saveField(type, id, name, val);
         } catch (NotFoundException ne) {
@@ -317,8 +305,7 @@
     }
 
     public String getField(String name)
-        throws IOException
-    {
+        throws IOException {
         return storage.getField(type, id, name);
     }
 
@@ -330,13 +317,12 @@
      * @throws IOException
      */
     public void setLongField(String name, long val)
-        throws IOException
-    {
+        throws IOException {
         try {
             storage.saveField(type, id, name, String.valueOf(val));
         } catch (NotFoundException ne) {
             throw new IOException("Job " + id + " was not found: " +
-                                  ne.getMessage());
+                ne.getMessage());
         }
     }
 
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/JobStateTracker.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/JobStateTracker.java
index 2055e10..78474cd 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/JobStateTracker.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/JobStateTracker.java
@@ -51,7 +51,7 @@
      *
      */
     public JobStateTracker(String node, ZooKeeper zk, boolean nodeIsTracker,
-            String job_trackingpath) {
+                           String job_trackingpath) {
         this.zk = zk;
         if (nodeIsTracker) {
             trackingnode = node;
@@ -65,13 +65,12 @@
      * Create the parent znode for this job state.
      */
     public void create()
-        throws IOException
-    {
+        throws IOException {
         String[] paths = ZooKeeperStorage.getPaths(job_trackingroot);
         for (String znode : paths) {
             try {
                 zk.create(znode, new byte[0],
-                          Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
+                    Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
             } catch (KeeperException.NodeExistsException e) {
             } catch (Exception e) {
                 throw new IOException("Unable to create parent nodes");
@@ -79,15 +78,14 @@
         }
         try {
             trackingnode = zk.create(makeTrackingZnode(), jobid.getBytes(),
-                    Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_SEQUENTIAL);
+                Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_SEQUENTIAL);
         } catch (Exception e) {
             throw new IOException("Unable to create " + makeTrackingZnode());
         }
     }
 
     public void delete()
-        throws IOException
-    {
+        throws IOException {
         try {
             zk.delete(makeTrackingJobZnode(trackingnode), -1);
         } catch (Exception e) {
@@ -103,7 +101,7 @@
     public String getJobID() throws IOException {
         try {
             return new String(zk.getData(makeTrackingJobZnode(trackingnode),
-                    false, new Stat()));
+                false, new Stat()));
         } catch (KeeperException e) {
             // It was deleted during the transaction
             throw new IOException("Node already deleted " + trackingnode);
@@ -131,11 +129,11 @@
      * expired.
      */
     public static List<String> getTrackingJobs(Configuration conf, ZooKeeper zk)
-            throws IOException {
+        throws IOException {
         ArrayList<String> jobs = new ArrayList<String>();
         try {
             for (String myid : zk.getChildren(
-                    conf.get(TempletonStorage.STORAGE_ROOT)
+                conf.get(TempletonStorage.STORAGE_ROOT)
                     + ZooKeeperStorage.TRACKINGDIR, false)) {
                 jobs.add(myid);
             }
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/NullRecordReader.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/NullRecordReader.java
index 135c4fe..723d577 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/NullRecordReader.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/NullRecordReader.java
@@ -18,6 +18,7 @@
 package org.apache.hcatalog.templeton.tool;
 
 import java.io.IOException;
+
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.RecordReader;
@@ -27,15 +28,15 @@
  * An empty record reader.
  */
 public class NullRecordReader
-    extends RecordReader<NullWritable, NullWritable>
-{
+    extends RecordReader<NullWritable, NullWritable> {
     @Override
     public void initialize(InputSplit genericSplit, TaskAttemptContext context)
-        throws IOException
-    {}
+        throws IOException {
+    }
 
     @Override
-    public void close() throws IOException {}
+    public void close() throws IOException {
+    }
 
     @Override
     public NullWritable getCurrentKey() {
@@ -48,7 +49,9 @@
     }
 
     @Override
-    public float getProgress() { return 1.0f; }
+    public float getProgress() {
+        return 1.0f;
+    }
 
     @Override
     public boolean nextKeyValue() throws IOException {
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/SingleInputFormat.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/SingleInputFormat.java
index a190a70..b461500 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/SingleInputFormat.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/SingleInputFormat.java
@@ -20,6 +20,7 @@
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.InputFormat;
 import org.apache.hadoop.mapreduce.InputSplit;
@@ -31,21 +32,18 @@
  * An empty InputFormat.
  */
 public class SingleInputFormat
-    extends InputFormat<NullWritable, NullWritable>
-{
+    extends InputFormat<NullWritable, NullWritable> {
     public List<InputSplit> getSplits(JobContext job)
-        throws IOException
-    {
+        throws IOException {
         List<InputSplit> res = new ArrayList<InputSplit>();
         res.add(new NullSplit());
         return res;
     }
 
     public RecordReader<NullWritable, NullWritable>
-        createRecordReader(InputSplit split,
-                           TaskAttemptContext context)
-        throws IOException
-    {
+    createRecordReader(InputSplit split,
+                       TaskAttemptContext context)
+        throws IOException {
         return new NullRecordReader();
     }
 }
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/TempletonControllerJob.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/TempletonControllerJob.java
index d7c4c8b..341ec5c 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/TempletonControllerJob.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/TempletonControllerJob.java
@@ -66,32 +66,32 @@
  *   in hdfs files.
  */
 public class TempletonControllerJob extends Configured implements Tool {
-    static enum ControllerCounters { SIMPLE_COUNTER };
+    static enum ControllerCounters {SIMPLE_COUNTER}
 
-    public static final String COPY_NAME      = "templeton.copy";
+    ;
+
+    public static final String COPY_NAME = "templeton.copy";
     public static final String STATUSDIR_NAME = "templeton.statusdir";
-    public static final String JAR_ARGS_NAME  = "templeton.args";
+    public static final String JAR_ARGS_NAME = "templeton.args";
     public static final String OVERRIDE_CLASSPATH = "templeton.override-classpath";
 
-    public static final String STDOUT_FNAME  = "stdout";
-    public static final String STDERR_FNAME  = "stderr";
-    public static final String EXIT_FNAME    = "exit";
+    public static final String STDOUT_FNAME = "stdout";
+    public static final String STDERR_FNAME = "stderr";
+    public static final String EXIT_FNAME = "exit";
 
     public static final int WATCHER_TIMEOUT_SECS = 10;
-    public static final int KEEP_ALIVE_MSEC      = 60 * 1000;
+    public static final int KEEP_ALIVE_MSEC = 60 * 1000;
 
     private static TrivialExecService execService = TrivialExecService.getInstance();
 
     private static final Log LOG = LogFactory.getLog(TempletonControllerJob.class);
-    
-    
+
+
     public static class LaunchMapper
-        extends Mapper<NullWritable, NullWritable, Text, Text>
-    {
+        extends Mapper<NullWritable, NullWritable, Text, Text> {
         protected Process startJob(Context context, String user,
                                    String overrideClasspath)
-            throws IOException, InterruptedException
-        {
+            throws IOException, InterruptedException {
             Configuration conf = context.getConfiguration();
             copyLocal(COPY_NAME, conf);
             String[] jarArgs
@@ -100,18 +100,17 @@
             ArrayList<String> removeEnv = new ArrayList<String>();
             removeEnv.add("HADOOP_ROOT_LOGGER");
             Map<String, String> env = TempletonUtils.hadoopUserEnv(user,
-                                                                   overrideClasspath);
+                overrideClasspath);
             List<String> jarArgsList = new LinkedList<String>(Arrays.asList(jarArgs));
             String tokenFile = System.getenv("HADOOP_TOKEN_FILE_LOCATION");
-            if(tokenFile != null){
-                jarArgsList.add(1, "-Dmapreduce.job.credentials.binary=" + tokenFile );
+            if (tokenFile != null) {
+                jarArgsList.add(1, "-Dmapreduce.job.credentials.binary=" + tokenFile);
             }
             return execService.run(jarArgsList, removeEnv, env);
         }
 
         private void copyLocal(String var, Configuration conf)
-            throws IOException
-        {
+            throws IOException {
             String[] filenames = TempletonUtils.decodeArray(conf.get(var));
             if (filenames != null) {
                 for (String filename : filenames) {
@@ -126,29 +125,28 @@
 
         @Override
         public void run(Context context)
-            throws IOException, InterruptedException
-        {
+            throws IOException, InterruptedException {
 
             Configuration conf = context.getConfiguration();
 
             Process proc = startJob(context,
-                                    conf.get("user.name"),
-                                    conf.get(OVERRIDE_CLASSPATH));
+                conf.get("user.name"),
+                conf.get(OVERRIDE_CLASSPATH));
 
             String statusdir = conf.get(STATUSDIR_NAME);
             Counter cnt = context.getCounter(ControllerCounters.SIMPLE_COUNTER);
 
             ExecutorService pool = Executors.newCachedThreadPool();
             executeWatcher(pool, conf, context.getJobID(),
-                           proc.getInputStream(), statusdir, STDOUT_FNAME);
+                proc.getInputStream(), statusdir, STDOUT_FNAME);
             executeWatcher(pool, conf, context.getJobID(),
-                           proc.getErrorStream(), statusdir, STDERR_FNAME);
+                proc.getErrorStream(), statusdir, STDERR_FNAME);
             KeepAlive keepAlive = startCounterKeepAlive(pool, cnt);
 
             proc.waitFor();
             keepAlive.sendReport = false;
             pool.shutdown();
-            if (! pool.awaitTermination(WATCHER_TIMEOUT_SECS, TimeUnit.SECONDS))
+            if (!pool.awaitTermination(WATCHER_TIMEOUT_SECS, TimeUnit.SECONDS))
                 pool.shutdownNow();
 
             writeExitValue(conf, proc.exitValue(), statusdir);
@@ -159,7 +157,7 @@
 
             if (proc.exitValue() != 0)
                 System.err.println("templeton: job failed with exit code "
-                                   + proc.exitValue());
+                    + proc.exitValue());
             else
                 System.err.println("templeton: job completed with exit code 0");
         }
@@ -167,29 +165,26 @@
         private void executeWatcher(ExecutorService pool, Configuration conf,
                                     JobID jobid, InputStream in, String statusdir,
                                     String name)
-            throws IOException
-        {
+            throws IOException {
             Watcher w = new Watcher(conf, jobid, in, statusdir, name);
             pool.execute(w);
         }
 
         private KeepAlive startCounterKeepAlive(ExecutorService pool, Counter cnt)
-            throws IOException
-        {
+            throws IOException {
             KeepAlive k = new KeepAlive(cnt);
             pool.execute(k);
             return k;
         }
 
         private void writeExitValue(Configuration conf, int exitValue, String statusdir)
-            throws IOException
-        {
+            throws IOException {
             if (TempletonUtils.isset(statusdir)) {
                 Path p = new Path(statusdir, EXIT_FNAME);
                 FileSystem fs = p.getFileSystem(conf);
                 OutputStream out = fs.create(p);
                 System.err.println("templeton: Writing exit value "
-                                   + exitValue + " to " + p);
+                    + exitValue + " to " + p);
                 PrintWriter writer = new PrintWriter(out);
                 writer.println(exitValue);
                 writer.close();
@@ -205,8 +200,7 @@
 
         public Watcher(Configuration conf, JobID jobid, InputStream in,
                        String statusdir, String name)
-            throws IOException
-        {
+            throws IOException {
             this.conf = conf;
             this.jobid = jobid;
             this.in = in;
@@ -266,8 +260,7 @@
         private Counter cnt;
         public boolean sendReport;
 
-        public KeepAlive(Counter cnt)
-        {
+        public KeepAlive(Counter cnt) {
             this.cnt = cnt;
             this.sendReport = true;
         }
@@ -286,6 +279,7 @@
     }
 
     private JobID submittedJobId;
+
     public String getSubmittedId() {
         if (submittedJobId == null)
             return null;
@@ -298,8 +292,7 @@
      */
     @Override
     public int run(String[] args)
-        throws IOException, InterruptedException, ClassNotFoundException
-    {
+        throws IOException, InterruptedException, ClassNotFoundException {
         Configuration conf = getConf();
         conf.set(JAR_ARGS_NAME, TempletonUtils.encodeArray(args));
         conf.set("user.name", UserGroupInformation.getCurrentUser().getShortUserName());
@@ -314,9 +307,9 @@
             = new NullOutputFormat<NullWritable, NullWritable>();
         job.setOutputFormatClass(of.getClass());
         job.setNumReduceTasks(0);
-        
+
         JobClient jc = new JobClient(new JobConf(job.getConfiguration()));
-        
+
         Token<DelegationTokenIdentifier> mrdt = jc.getDelegationToken(new Text("mr token"));
         job.getCredentials().addToken(new Text("mr token"), mrdt);
         job.submit();
@@ -326,7 +319,7 @@
         return 0;
     }
 
-    
+
     public static void main(String[] args) throws Exception {
         int ret = ToolRunner.run(new TempletonControllerJob(), args);
         if (ret != 0)
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/TempletonUtils.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/TempletonUtils.java
index 8213af7..67bd8fd 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/TempletonUtils.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/TempletonUtils.java
@@ -30,6 +30,7 @@
 import java.util.Map;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -65,14 +66,14 @@
      * Is the object non-empty?
      */
     public static <T> boolean isset(Collection<T> col) {
-        return (col != null) && (! col.isEmpty());
+        return (col != null) && (!col.isEmpty());
     }
 
     /**
      * Is the object non-empty?
      */
     public static <K, V> boolean isset(Map<K, V> col) {
-        return (col != null) && (! col.isEmpty());
+        return (col != null) && (!col.isEmpty());
     }
 
 
@@ -160,8 +161,7 @@
     public static String[] hadoopFsListAsArray(String files, Configuration conf,
                                                String user)
         throws URISyntaxException, FileNotFoundException, IOException,
-        InterruptedException
-    {
+        InterruptedException {
         if (files == null || conf == null) {
             return null;
         }
@@ -177,8 +177,7 @@
     public static String hadoopFsListAsString(String files, Configuration conf,
                                               String user)
         throws URISyntaxException, FileNotFoundException, IOException,
-        InterruptedException
-    {
+        InterruptedException {
         if (files == null || conf == null) {
             return null;
         }
@@ -187,8 +186,7 @@
 
     public static String hadoopFsFilename(String fname, Configuration conf, String user)
         throws URISyntaxException, FileNotFoundException, IOException,
-        InterruptedException
-    {
+        InterruptedException {
         Path p = hadoopFsPath(fname, conf, user);
         if (p == null)
             return null;
@@ -201,8 +199,8 @@
      */
     public static boolean hadoopFsIsMissing(FileSystem fs, Path p) {
         try {
-            return ! fs.exists(p);
-        } catch(Throwable t) {
+            return !fs.exists(p);
+        } catch (Throwable t) {
             // Got an error, might be there anyway due to a
             // permissions problem.
             return false;
@@ -211,8 +209,7 @@
 
     public static Path hadoopFsPath(String fname, Configuration conf, String user)
         throws URISyntaxException, FileNotFoundException, IOException,
-        InterruptedException
-    {
+        InterruptedException {
         if (fname == null || conf == null) {
             return null;
         }
@@ -231,8 +228,7 @@
      * GET the given url.  Returns the number of bytes received.
      */
     public static int fetchUrl(URL url)
-        throws IOException
-    {
+        throws IOException {
         URLConnection cnx = url.openConnection();
         InputStream in = cnx.getInputStream();
 
@@ -249,8 +245,7 @@
      * Set the environment variables to specify the hadoop user.
      */
     public static Map<String, String> hadoopUserEnv(String user,
-                                                    String overrideClasspath)
-    {
+                                                    String overrideClasspath) {
         HashMap<String, String> env = new HashMap<String, String>();
         env.put("HADOOP_USER_NAME", user);
 
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/TrivialExecService.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/TrivialExecService.java
index 20a8f2b..36759b3 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/TrivialExecService.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/TrivialExecService.java
@@ -39,11 +39,10 @@
 
     public Process run(List<String> cmd, List<String> removeEnv,
                        Map<String, String> environmentVariables)
-        throws IOException
-    {
+        throws IOException {
         System.err.println("templeton: starting " + cmd);
-        System.err.print("With environment variables: " );
-        for(Map.Entry<String, String> keyVal : environmentVariables.entrySet()){
+        System.err.print("With environment variables: ");
+        for (Map.Entry<String, String> keyVal : environmentVariables.entrySet()) {
             System.err.println(keyVal.getKey() + "=" + keyVal.getValue());
         }
         ProcessBuilder pb = new ProcessBuilder(cmd);
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/ZooKeeperCleanup.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/ZooKeeperCleanup.java
index 03407c4..5e4f452 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/ZooKeeperCleanup.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/ZooKeeperCleanup.java
@@ -77,7 +77,7 @@
     }
 
     public static void startInstance(Configuration appConf) throws IOException {
-        if (! isRunning) {
+        if (!isRunning) {
             getInstance(appConf).start();
         }
     }
@@ -122,7 +122,7 @@
 
                 long sleepMillis = (long) (Math.random() * interval);
                 LOG.info("Next execution: " + new Date(new Date().getTime()
-                                                       + sleepMillis));
+                    + sleepMillis));
                 Thread.sleep(sleepMillis);
 
             } catch (Exception e) {
@@ -157,8 +157,8 @@
         JobState state = null;
         try {
             JobStateTracker tracker = new JobStateTracker(node, zk, true,
-                    appConf.get(TempletonStorage.STORAGE_ROOT +
-                            ZooKeeperStorage.TRACKINGDIR));
+                appConf.get(TempletonStorage.STORAGE_ROOT +
+                    ZooKeeperStorage.TRACKINGDIR));
             long now = new Date().getTime();
             state = new JobState(tracker.getJobID(), appConf);
 
diff --git a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/ZooKeeperStorage.java b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/ZooKeeperStorage.java
index 2183808..6bfe460 100644
--- a/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/ZooKeeperStorage.java
+++ b/webhcat/svr/src/main/java/org/apache/hcatalog/templeton/tool/ZooKeeperStorage.java
@@ -65,25 +65,23 @@
      * Open a ZooKeeper connection for the JobState.
      */
     public static ZooKeeper zkOpen(String zkHosts, int zkSessionTimeout)
-        throws IOException
-    {
+        throws IOException {
         return new ZooKeeper(zkHosts,
-                             zkSessionTimeout,
-                             new Watcher() {
-                                 @Override
-                                 synchronized public void process(WatchedEvent event) {
-                                 }
-                             });
+            zkSessionTimeout,
+            new Watcher() {
+                @Override
+                synchronized public void process(WatchedEvent event) {
+                }
+            });
     }
 
     /**
      * Open a ZooKeeper connection for the JobState.
      */
     public static ZooKeeper zkOpen(Configuration conf)
-        throws IOException
-    {
+        throws IOException {
         return zkOpen(conf.get(ZK_HOSTS),
-                      conf.getInt(ZK_SESSION_TIMEOUT, 30000));
+            conf.getInt(ZK_SESSION_TIMEOUT, 30000));
     }
 
     public ZooKeeperStorage() {
@@ -95,8 +93,7 @@
      * Close this ZK connection.
      */
     public void close()
-        throws IOException
-    {
+        throws IOException {
         if (zk != null) {
             try {
                 zk.close();
@@ -119,15 +116,14 @@
      * Create a node in ZooKeeper
      */
     public void create(Type type, String id)
-        throws IOException
-    {
+        throws IOException {
         try {
             String[] paths = getPaths(makeZnode(type, id));
             boolean wasCreated = false;
             for (String znode : paths) {
                 try {
                     zk.create(znode, new byte[0],
-                              Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
+                        Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
                     wasCreated = true;
                 } catch (KeeperException.NodeExistsException e) {
                 }
@@ -138,7 +134,7 @@
                     // to see how the storage mechanism evolves.
                     if (type.equals(Type.JOB)) {
                         JobStateTracker jt = new JobStateTracker(id, zk, false,
-                                job_trackingpath);
+                            job_trackingpath);
                         jt.create();
                     }
                 } catch (Exception e) {
@@ -153,7 +149,7 @@
             if (wasCreated) {
                 try {
                     saveField(type, id, "created",
-                              Long.toString(System.currentTimeMillis()));
+                        Long.toString(System.currentTimeMillis()));
                 } catch (NotFoundException nfe) {
                     // Wow, something's really wrong.
                     throw new IOException("Couldn't write to node " + id, nfe);
@@ -210,17 +206,16 @@
      * @throws InterruptedException
      */
     private void setFieldData(Type type, String id, String name, String val)
-        throws KeeperException, UnsupportedEncodingException, InterruptedException
-    {
+        throws KeeperException, UnsupportedEncodingException, InterruptedException {
         try {
             zk.create(makeFieldZnode(type, id, name),
-                      val.getBytes(ENCODING),
-                      Ids.OPEN_ACL_UNSAFE,
-                      CreateMode.PERSISTENT);
-        } catch(KeeperException.NodeExistsException e) {
+                val.getBytes(ENCODING),
+                Ids.OPEN_ACL_UNSAFE,
+                CreateMode.PERSISTENT);
+        } catch (KeeperException.NodeExistsException e) {
             zk.setData(makeFieldZnode(type, id, name),
-                       val.getBytes(ENCODING),
-                       -1);
+                val.getBytes(ENCODING),
+                -1);
         }
     }
 
@@ -246,9 +241,9 @@
                 create(type, id);
                 setFieldData(type, id, key, val);
             }
-        } catch(Exception e) {
+        } catch (Exception e) {
             throw new NotFoundException("Writing " + key + ": " + val + ", "
-                                        + e.getMessage());
+                + e.getMessage());
         }
     }
 
@@ -257,7 +252,7 @@
         try {
             byte[] b = zk.getData(makeFieldZnode(type, id, key), false, null);
             return new String(b, ENCODING);
-        } catch(Exception e) {
+        } catch (Exception e) {
             return null;
         }
     }
@@ -266,12 +261,12 @@
     public Map<String, String> getFields(Type type, String id) {
         HashMap<String, String> map = new HashMap<String, String>();
         try {
-            for (String node: zk.getChildren(makeZnode(type, id), false)) {
+            for (String node : zk.getChildren(makeZnode(type, id), false)) {
                 byte[] b = zk.getData(makeFieldZnode(type, id, node),
-                                      false, null);
+                    false, null);
                 map.put(node, new String(b, ENCODING));
             }
-        } catch(Exception e) {
+        } catch (Exception e) {
             return map;
         }
         return map;
@@ -287,7 +282,7 @@
                     // Other nodes may be trying to delete this at the same time,
                     // so just log errors and skip them.
                     throw new NotFoundException("Couldn't delete " +
-                                                makeFieldZnode(type, id, child));
+                        makeFieldZnode(type, id, child));
                 }
             }
             try {
@@ -295,12 +290,12 @@
             } catch (Exception e) {
                 // Same thing -- might be deleted by other nodes, so just go on.
                 throw new NotFoundException("Couldn't delete " +
-                                            makeZnode(type, id));
+                    makeZnode(type, id));
             }
         } catch (Exception e) {
             // Error getting children of node -- probably node has been deleted
             throw new NotFoundException("Couldn't get children of " +
-                                        makeZnode(type, id));
+                makeZnode(type, id));
         }
         return true;
     }
@@ -308,7 +303,7 @@
     @Override
     public List<String> getAll() {
         ArrayList<String> allNodes = new ArrayList<String>();
-        for (Type type: Type.values()) {
+        for (Type type : Type.values()) {
             allNodes.addAll(getAllForType(type));
         }
         return allNodes;
@@ -327,7 +322,7 @@
     public List<String> getAllForKey(String key, String value) {
         ArrayList<String> allNodes = new ArrayList<String>();
         try {
-            for (Type type: Type.values()) {
+            for (Type type : Type.values()) {
                 allNodes.addAll(getAllForTypeAndKey(type, key, value));
             }
         } catch (Exception e) {
diff --git a/webhcat/svr/src/test/java/org/apache/hcatalog/templeton/mock/MockServer.java b/webhcat/svr/src/test/java/org/apache/hcatalog/templeton/mock/MockServer.java
index c5a8633..7f0d908 100644
--- a/webhcat/svr/src/test/java/org/apache/hcatalog/templeton/mock/MockServer.java
+++ b/webhcat/svr/src/test/java/org/apache/hcatalog/templeton/mock/MockServer.java
@@ -24,6 +24,7 @@
  */
 public class MockServer extends Server {
     public String user;
+
     public MockServer() {
         execService = new MockExecService();
         resetUser();
diff --git a/webhcat/svr/src/test/java/org/apache/hcatalog/templeton/tool/TestTempletonUtils.java b/webhcat/svr/src/test/java/org/apache/hcatalog/templeton/tool/TestTempletonUtils.java
index 984a445..36b81c2 100644
--- a/webhcat/svr/src/test/java/org/apache/hcatalog/templeton/tool/TestTempletonUtils.java
+++ b/webhcat/svr/src/test/java/org/apache/hcatalog/templeton/tool/TestTempletonUtils.java
@@ -183,7 +183,7 @@
             Assert.assertTrue(TempletonUtils.hadoopFsListAsString("/tmp,/usr",
                                                            null, null) == null);
             Assert.assertEquals("file:/tmp,file:/usr", TempletonUtils.hadoopFsListAsString
-                         ("/tmp,/usr", new Configuration(), null));
+                ("/tmp,/usr", new Configuration(), null));
         } catch (FileNotFoundException e) {
             Assert.fail("Couldn't find name for /tmp");
         } catch (Exception e) {
diff --git a/webhcat/svr/src/test/java/org/apache/hcatalog/templeton/tool/TestTrivialExecService.java b/webhcat/svr/src/test/java/org/apache/hcatalog/templeton/tool/TestTrivialExecService.java
index 84cb784..72fb331 100644
--- a/webhcat/svr/src/test/java/org/apache/hcatalog/templeton/tool/TestTrivialExecService.java
+++ b/webhcat/svr/src/test/java/org/apache/hcatalog/templeton/tool/TestTrivialExecService.java
@@ -36,17 +36,17 @@
         try {
             Process process = TrivialExecService.getInstance()
                 .run(list,
-                     new ArrayList<String>(),
-                     new HashMap<String, String>());
+                    new ArrayList<String>(),
+                    new HashMap<String, String>());
             out = new BufferedReader(new InputStreamReader(
-                                         process.getInputStream()));
+                process.getInputStream()));
             err = new BufferedReader(new InputStreamReader(
-                                         process.getErrorStream()));
+                process.getErrorStream()));
             Assert.assertEquals("success", out.readLine());
             out.close();
             String line;
             while ((line = err.readLine()) != null) {
-              Assert.fail(line);
+                Assert.fail(line);
             }
             process.waitFor();
         } catch (Exception e) {