[maven-release-plugin]  copy for tag hbase-0.95.2-hadoop2-testing

git-svn-id: https://svn.apache.org/repos/asf/hbase/tags/hbase-0.95.2-hadoop2-testing@1509466 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/dev-support/generate-hadoopX-poms.sh b/dev-support/generate-hadoopX-poms.sh
new file mode 100644
index 0000000..5e2c629
--- /dev/null
+++ b/dev-support/generate-hadoopX-poms.sh
@@ -0,0 +1,133 @@
+#!/bin/bash
+##
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+# Generates a pom.xml with a haoop1 or a hadoop2 suffix which can
+# then be used for generating hadoop1 or hadoop2 suitable hbase's
+# fit for publishing to a maven repository. Use these poms also
+# making tarballs to release.  The original pom.xml is untouched.
+#
+# This script exists because we cannot figure how to publish
+# into the local (or remote) repositories artifacts that can be
+# used by downstream projects (with poms describing necessary
+# includes).  See HBASE-8224 and HBASE-8488 for background.
+#
+# Generation is done by replacing values in original pom and
+# enabling appropriate profile using the '!' trick in the
+# hbase.profile property (this is fragile!) so no need to specify
+# profile on the command line.  The original pom.xml should be
+# what we maintain adding in new depdencies, etc., as needed.
+# This script should make it through most any change to the
+# original.
+#
+# Here is how you would build an hbase against hadoop2 and publish
+# the artifacts to your local repo:
+#
+# First run this script passing in current project version and what
+# version you would like the generated artifacts to have.  Include
+# either -hadoop1 if built against hadoop1 or -hadoop2 if build against
+# hadoop2.  These substrings are expected as part of the new version.
+#
+#  $ bash -x ./dev-support/generate-hadoopX-poms.sh 0.95.2-SNAPSHOT 0.95.2-hadoop2-SNAPSHOT
+#
+# This will generate new poms beside current pom.xml made from the
+# origin pom.xml but with a hadoop1 or hadoop2 suffix dependent on
+# what you passed for a new version.  Now build passing generated
+# pom name as the pom mvn should use.  For example, say we were building
+# hbase against hadoop2:
+#
+#$ mvn clean install -DskipTests -f pom.xml.hadoop2
+#
+
+# TODO: Generate new poms into target dirs so doesn't pollute src tree.
+# It is a little awkward to do since parent pom needs to be able to find
+# the child modules and the child modules need to be able to get to the
+# parent.
+
+function usage {
+  echo "Usage: $0 CURRENT_VERSION NEW_VERSION"
+  echo "For example, $0 hbase-0.95.2-SNAPSHOT hbase-0.95.2-hadoop1-SNAPSHOT"
+  echo "Presumes VERSION has hadoop1 or hadoop2 in it."
+  exit 1
+}
+
+if [[ "$#" -ne 2 ]]; then usage; fi
+old_hbase_version="$1"
+new_hbase_version="$2"
+# Get hadoop version from the new hbase version
+hadoop_version=`echo "$new_hbase_version" | sed -n 's/.*-\(hadoop[12]\)-.*/\1/p'`
+if [[ -z $hadoop_version ]]; then usage ; fi
+
+# Get dir to operate in
+hbase_home="${HBASE_HOME}"
+if [[ -z "$hbase_home" ]]; then
+  here="`dirname \"$0\"`"              # relative
+  here="`( cd \"$here\" && pwd )`"  # absolutized and normalized
+  if [ -z "$here" ] ; then
+    # error; for some reason, the path is not accessible
+    # to the script (e.g. permissions re-evaled after suid)
+    exit 1  # fail
+  fi
+  hbase_home="`dirname \"$here\"`"
+fi
+
+# Now figure which profile to activate.
+h1=
+h2=
+default='<name>!hadoop.profile<\/name>'
+notdefault='<name>hadoop.profile<\/name>'
+case "${hadoop_version}" in
+  hadoop1)
+    h1="${default}"
+    h2="${notdefault}<value>2.0<\/value>"
+    ;;
+  hadoop2)
+    h1="${notdefault}<value>1.1<\/value>"
+    h2="${default}"
+    ;;
+ *) echo "Unknown ${hadoop_version}"
+    usage
+    ;;
+esac
+
+pom=pom.xml
+nupom="$pom.$hadoop_version"
+poms=`find $hbase_home -name ${pom}`
+for p in $poms; do
+  nuname="`dirname $p`/${nupom}"
+  # Now we do search and replace of explicit strings.  The best
+  # way of seeing what the below does is by doing a diff between
+  # the original pom and the generated pom (pom.xml.hadoop1 or
+  # pom.xml.hadoop2). We replace the compat.module variable with
+  # either hbase-hadoop1-compat or hbase-hadoop2-compat, we
+  # replace the version string in all poms, we change modules
+  # to include reference to the non-standard pom name, we
+  # adjust relative paths so child modules can find the parent pom,
+  # and we enable/disable hadoop 1 and hadoop 2 profiles as
+  # appropriate removing a comment string too.  We output the
+  # new pom beside the original.
+  sed -e "s/\${compat.module}/hbase-${hadoop_version}-compat/" \
+    -e "s/${old_hbase_version}/${new_hbase_version}/" \
+    -e "s/\(<module>[^<]*\)/\1\/${nupom}/" \
+    -e "s/\(relativePath\>\.\.\)/\1\/${nupom}/" \
+    -e "s/<!--h1-->.*name>.*/${h1}/" \
+    -e "s/<!--h2-->.*<name>.*/${h2}/" \
+    -e '/--Below formatting for .*poms\.sh--/d' \
+    -e 's/\(<pomFileName>\)[^<]*/\1${nupom}/' \
+  $p > "$nuname"
+done
diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml
index 7d5e413..3ddf2ae 100644
--- a/hbase-client/pom.xml
+++ b/hbase-client/pom.xml
@@ -154,10 +154,11 @@
  activation property as the parent Hadoop 1.0.x profile to make sure it gets run at
  the same time. -->
     <profile>
-      <id>hadoop-1.0</id>
+      <id>hadoop-1.1</id>
       <activation>
         <property>
-          <name>!hadoop.profile</name>
+            <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+            <!--h1--><name>!hadoop.profile</name>
         </property>
       </activation>
       <dependencies>
@@ -176,14 +177,22 @@
       <id>hadoop-2.0</id>
       <activation>
         <property>
-          <name>hadoop.profile</name>
-          <value>2.0</value>
+            <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+            <!--h2--><name>hadoop.profile</name><value>2.0</value>
         </property>
       </activation>
       <dependencies>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-client</artifactId>
+          <artifactId>hadoop-common</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-auth</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-core</artifactId>
         </dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml
index 05093d9..e000265 100644
--- a/hbase-common/pom.xml
+++ b/hbase-common/pom.xml
@@ -187,14 +187,27 @@
       <groupId>commons-logging</groupId>
       <artifactId>commons-logging</artifactId>
     </dependency>
-     <dependency>
-       <groupId>org.slf4j</groupId>
-       <artifactId>slf4j-api</artifactId>
-     </dependency>
-     <dependency>
-       <groupId>org.slf4j</groupId>
-       <artifactId>slf4j-log4j12</artifactId>
-     </dependency>
+    <dependency>
+      <groupId>commons-codec</groupId>
+      <artifactId>commons-codec</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-lang</groupId>
+      <artifactId>commons-lang</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-collections</groupId>
+      <artifactId>commons-collections</artifactId>
+      <version>3.2.1</version>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
+      <scope>compile</scope>
+    </dependency>
   </dependencies>
 
   <profiles>
@@ -218,7 +231,8 @@
       <id>hadoop-1.1</id>
       <activation>
         <property>
-          <name>!hadoop.profile</name>
+            <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+            <!--h1--><name>!hadoop.profile</name>
         </property>
       </activation>
       <dependencies>
@@ -260,18 +274,18 @@
       <id>hadoop-2.0</id>
       <activation>
         <property>
-          <name>hadoop.profile</name>
-          <value>2.0</value>
+            <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+            <!--h2--><name>hadoop.profile</name><value>2.0</value>
         </property>
       </activation>
       <dependencies>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-client</artifactId>
+          <artifactId>hadoop-annotations</artifactId>
         </dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-annotations</artifactId>
+          <artifactId>hadoop-common</artifactId>
         </dependency>
       </dependencies>
       <build>
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JVM.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JVM.java
index d91c8b2..a60d96c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JVM.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JVM.java
@@ -27,10 +27,10 @@
 import java.io.InputStream;
 import java.io.InputStreamReader;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import java.lang.reflect.Method;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -44,10 +44,8 @@
 
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public class JVM 
-{
-  static final Logger LOG = LoggerFactory.getLogger(JVM.class);
-
+public class JVM {
+  private static final Log LOG = LogFactory.getLog(JVM.class);
   private OperatingSystemMXBean osMbean;
 
   private static final boolean ibmvendor =
diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml
index e88f77c..bb4ba0d 100644
--- a/hbase-examples/pom.xml
+++ b/hbase-examples/pom.xml
@@ -66,10 +66,32 @@
     <dependency>
         <groupId>org.apache.hbase</groupId>
         <artifactId>hbase-common</artifactId>
+    </dependency>
+    <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-common</artifactId>
         <type>test-jar</type>
         <scope>test</scope>
     </dependency>
     <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-protocol</artifactId>
+        <scope>compile</scope>
+    </dependency>
+    <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-client</artifactId>
+        <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.zookeeper</groupId>
+      <artifactId>zookeeper</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.protobuf</groupId>
+      <artifactId>protobuf-java</artifactId>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-server</artifactId>
     </dependency>
@@ -84,12 +106,8 @@
       <artifactId>libthrift</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-log4j12</artifactId>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
     </dependency>
  </dependencies>
  <profiles>
@@ -117,7 +135,8 @@
          <id>hadoop-1.1</id>
          <activation>
              <property>
-                 <name>!hadoop.profile</name>
+            <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+            <!--h1--><name>!hadoop.profile</name>
              </property>
          </activation>
          <dependencies>
@@ -139,22 +158,18 @@
          <id>hadoop-2.0</id>
          <activation>
              <property>
-                 <name>hadoop.profile</name>
-                 <value>2.0</value>
+            <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+            <!--h2--><name>hadoop.profile</name><value>2.0</value>
              </property>
          </activation>
          <dependencies>
              <dependency>
                  <groupId>org.apache.hadoop</groupId>
-                 <artifactId>hadoop-client</artifactId>
+                 <artifactId>hadoop-mapreduce-client-core</artifactId>
              </dependency>
              <dependency>
                  <groupId>org.apache.hadoop</groupId>
-                 <artifactId>hadoop-annotations</artifactId>
-             </dependency>
-             <dependency>
-                 <groupId>org.apache.hadoop</groupId>
-                 <artifactId>hadoop-minicluster</artifactId>
+                 <artifactId>hadoop-common</artifactId>
              </dependency>
          </dependencies>
          <build>
diff --git a/hbase-hadoop1-compat/pom.xml b/hbase-hadoop1-compat/pom.xml
index d24a5d1..f5f2c34 100644
--- a/hbase-hadoop1-compat/pom.xml
+++ b/hbase-hadoop1-compat/pom.xml
@@ -118,16 +118,16 @@
       <artifactId>metrics-core</artifactId>
     </dependency>
     <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
-    </dependency>
-    <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-test</artifactId>
       <version>${hadoop-one.version}</version><!--$NO-MVN-MAN-VER$-->
       <optional>true</optional>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+    </dependency>
   </dependencies>
 
   <profiles>
diff --git a/hbase-hadoop2-compat/pom.xml b/hbase-hadoop2-compat/pom.xml
index c1d1fa2..a30f459 100644
--- a/hbase-hadoop2-compat/pom.xml
+++ b/hbase-hadoop2-compat/pom.xml
@@ -140,7 +140,7 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-client</artifactId>
+      <artifactId>hadoop-mapreduce-client-core</artifactId>
       <version>${hadoop-two.version}</version>
     </dependency>
     <dependency>
@@ -157,10 +157,6 @@
       <groupId>com.yammer.metrics</groupId>
       <artifactId>metrics-core</artifactId>
     </dependency>
-    <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
-    </dependency>
     <!-- This was marked as test dep in earlier pom, but was scoped compile. Where
       do we actually need it? -->
     <dependency>
@@ -168,6 +164,18 @@
       <artifactId>hadoop-minicluster</artifactId>
       <version>${hadoop-two.version}</version>
     </dependency>
+    <dependency>
+      <groupId>commons-lang</groupId>
+      <artifactId>commons-lang</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
   </dependencies>
 
   <profiles>
diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml
index eb3d068..8a535d5 100644
--- a/hbase-it/pom.xml
+++ b/hbase-it/pom.xml
@@ -140,10 +140,43 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-common</artifactId>
+      <type>jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
       <type>test-jar</type>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-protocol</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-client</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-cli</groupId>
+      <artifactId>commons-cli</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-math</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-lang</groupId>
+      <artifactId>commons-lang</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-server</artifactId>
     </dependency>
     <dependency>
@@ -175,6 +208,10 @@
           <type>test-jar</type>
           <scope>test</scope>
       </dependency>
+      <dependency>
+        <groupId>org.cloudera.htrace</groupId>
+        <artifactId>htrace</artifactId>
+      </dependency>
     <!-- General dependencies -->
 
   </dependencies>
@@ -200,7 +237,8 @@
       <id>hadoop-1.1</id>
       <activation>
         <property>
-          <name>!hadoop.profile</name>
+            <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+            <!--h1--><name>!hadoop.profile</name>
         </property>
       </activation>
       <dependencies>
@@ -242,14 +280,19 @@
       <id>hadoop-2.0</id>
       <activation>
         <property>
-          <name>hadoop.profile</name>
-          <value>2.0</value>
+            <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+            <!--h2--><name>hadoop.profile</name><value>2.0</value>
         </property>
       </activation>
       <dependencies>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-client</artifactId>
+          <artifactId>hadoop-mapreduce-client-core</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+          <type>test-jar</type>
         </dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
@@ -257,7 +300,7 @@
         </dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-minicluster</artifactId>
+          <artifactId>hadoop-common</artifactId>
         </dependency>
       </dependencies>
       <build>
diff --git a/hbase-prefix-tree/pom.xml b/hbase-prefix-tree/pom.xml
index b34df17..a229f4d 100644
--- a/hbase-prefix-tree/pom.xml
+++ b/hbase-prefix-tree/pom.xml
@@ -78,6 +78,105 @@
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-common</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-hadoop-compat</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>${compat.module}</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+    </dependency>
   </dependencies>
 
+  <profiles>
+    <!-- Profiles for building against different hadoop versions -->
+    <profile>
+      <id>hadoop-1.1</id>
+      <activation>
+        <property>
+            <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+            <!--h1--><name>!hadoop.profile</name>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-core</artifactId>
+        </dependency>
+      </dependencies>
+    </profile>
+    <profile>
+      <id>hadoop-1.0</id>
+      <activation>
+        <property>
+          <name>hadoop.profile</name>
+          <value>1.0</value>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-core</artifactId>
+        </dependency>
+      </dependencies>
+    </profile>
+    <!--
+      profile for building against Hadoop 2.0.0-alpha. Activate using:
+       mvn -Dhadoop.profile=2.0
+    -->
+    <profile>
+      <id>hadoop-2.0</id>
+      <activation>
+        <property>
+            <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+            <!--h2--><name>hadoop.profile</name><value>2.0</value>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-annotations</artifactId>
+        </dependency>
+      </dependencies>
+    </profile>
+    <!--
+      profile for building against Hadoop 3.0.x. Activate using:
+       mvn -Dhadoop.profile=3.0
+    -->
+    <profile>
+      <id>hadoop-3.0</id>
+      <activation>
+        <property>
+          <name>hadoop.profile</name>
+          <value>3.0</value>
+        </property>
+      </activation>
+      <properties>
+        <hadoop.version>3.0-SNAPSHOT</hadoop.version>
+      </properties>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-annotations</artifactId>
+        </dependency>
+      </dependencies>
+    </profile>
+  </profiles>
 </project>
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index 04fd90c..3d33968 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -300,6 +300,14 @@
       <type>test-jar</type>
     </dependency>
     <dependency>
+      <groupId>commons-httpclient</groupId>
+      <artifactId>commons-httpclient</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-collections</groupId>
+      <artifactId>commons-collections</artifactId>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop-compat</artifactId>
     </dependency>
@@ -372,6 +380,10 @@
       <artifactId>zookeeper</artifactId>
     </dependency>
     <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+    </dependency>
+    <dependency>
       <groupId>org.apache.thrift</groupId>
       <artifactId>libthrift</artifactId>
     </dependency>
@@ -420,14 +432,6 @@
       <artifactId>jackson-xc</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-log4j12</artifactId>
-    </dependency>
-    <dependency>
       <groupId>tomcat</groupId>
       <artifactId>jasper-compiler</artifactId>
     </dependency>
@@ -548,7 +552,8 @@
       <id>hadoop-1.1</id>
       <activation>
         <property>
-          <name>!hadoop.profile</name>
+            <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+            <!--h1--><name>!hadoop.profile</name>
         </property>
       </activation>
       <dependencies>
@@ -589,13 +594,39 @@
       <id>hadoop-2.0</id>
       <activation>
         <property>
-          <name>hadoop.profile</name>
-          <value>2.0</value>
+            <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+            <!--h2--><name>hadoop.profile</name><value>2.0</value>
         </property>
       </activation>
       <dependencies>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-auth</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-core</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+          <type>test-jar</type>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdfs</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdfs</artifactId>
+          <type>test-jar</type>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-client</artifactId>
         </dependency>
         <dependency>
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java
index 860fff9..9bffc5c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java
@@ -18,7 +18,7 @@
 
 /**
  * Restrict the domain of a data attribute, often times to fulfill business rules/requirements.
- * 
+ *
  <p>
  <h2> Table of Contents</h2>
  <ul>
@@ -30,94 +30,94 @@
  </p>
 
  <h2><a name="overview">Overview</a></h2>
- Constraints are used to enforce business rules in a database. 
- By checking all {@link org.apache.hadoop.hbase.client.Put Puts} on a given table, you can enforce very specific data policies. 
- For instance, you can ensure that a certain column family-column qualifier pair always has a value between 1 and 10. 
+ Constraints are used to enforce business rules in a database.
+ By checking all {@link org.apache.hadoop.hbase.client.Put Puts} on a given table, you can enforce very specific data policies.
+ For instance, you can ensure that a certain column family-column qualifier pair always has a value between 1 and 10.
  Otherwise, the {@link org.apache.hadoop.hbase.client.Put} is rejected and the data integrity is maintained.
  <p>
- Constraints are designed to be configurable, so a constraint can be used across different tables, but implement different 
+ Constraints are designed to be configurable, so a constraint can be used across different tables, but implement different
  behavior depending on the specific configuration given to that constraint.
  <p>
- By adding a constraint to a table (see <a href="#usage">Example Usage</a>), constraints will automatically enabled. 
- You also then have the option of to disable (just 'turn off') or remove (delete all associated information) all constraints on a table. 
- If you remove all constraints 
- (see {@link org.apache.hadoop.hbase.constraint.Constraints#remove(org.apache.hadoop.hbase.HTableDescriptor)}, 
- you must re-add any {@link org.apache.hadoop.hbase.constraint.Constraint} you want on that table. 
- However, if they are just disabled (see {@link org.apache.hadoop.hbase.constraint.Constraints#disable(org.apache.hadoop.hbase.HTableDescriptor)}, 
+ By adding a constraint to a table (see <a href="#usage">Example Usage</a>), constraints will automatically enabled.
+ You also then have the option of to disable (just 'turn off') or remove (delete all associated information) all constraints on a table.
+ If you remove all constraints
+ (see {@link org.apache.hadoop.hbase.constraint.Constraints#remove(org.apache.hadoop.hbase.HTableDescriptor)},
+ you must re-add any {@link org.apache.hadoop.hbase.constraint.Constraint} you want on that table.
+ However, if they are just disabled (see {@link org.apache.hadoop.hbase.constraint.Constraints#disable(org.apache.hadoop.hbase.HTableDescriptor)},
  all you need to do is enable constraints again, and everything will be turned back on as it was configured.
  Individual constraints can also be individually enabled, disabled or removed without affecting other constraints.
  <p>
- By default, constraints are disabled on a table. 
+ By default, constraints are disabled on a table.
  This means you will not see <i>any</i> slow down on a table if constraints are not enabled.
  <p>
 
  <h2><a name="concurrency">Concurrency and Atomicity</a></h2>
- Currently, no attempts at enforcing correctness in a multi-threaded scenario when modifying a constraint, via 
- {@link org.apache.hadoop.hbase.constraint.Constraints}, to the the {@link org.apache.hadoop.hbase.HTableDescriptor}. 
- This is particularly important when adding a constraint(s) to the {@link org.apache.hadoop.hbase.HTableDescriptor} 
+ Currently, no attempts at enforcing correctness in a multi-threaded scenario when modifying a constraint, via
+ {@link org.apache.hadoop.hbase.constraint.Constraints}, to the the {@link org.apache.hadoop.hbase.HTableDescriptor}.
+ This is particularly important when adding a constraint(s) to the {@link org.apache.hadoop.hbase.HTableDescriptor}
  as it first retrieves the next priority from a custom value set in the descriptor,
- adds each constraint (with increasing priority) to the descriptor, and then the next available priority is re-stored 
- back in the {@link org.apache.hadoop.hbase.HTableDescriptor}. 
+ adds each constraint (with increasing priority) to the descriptor, and then the next available priority is re-stored
+ back in the {@link org.apache.hadoop.hbase.HTableDescriptor}.
  <p>
- Locking is recommended around each of Constraints add methods: 
- {@link org.apache.hadoop.hbase.constraint.Constraints#add(org.apache.hadoop.hbase.HTableDescriptor, Class...)}, 
- {@link org.apache.hadoop.hbase.constraint.Constraints#add(org.apache.hadoop.hbase.HTableDescriptor, org.apache.hadoop.hbase.util.Pair...)}, 
+ Locking is recommended around each of Constraints add methods:
+ {@link org.apache.hadoop.hbase.constraint.Constraints#add(org.apache.hadoop.hbase.HTableDescriptor, Class...)},
+ {@link org.apache.hadoop.hbase.constraint.Constraints#add(org.apache.hadoop.hbase.HTableDescriptor, org.apache.hadoop.hbase.util.Pair...)},
  and {@link org.apache.hadoop.hbase.constraint.Constraints#add(org.apache.hadoop.hbase.HTableDescriptor, Class, org.apache.hadoop.conf.Configuration)}.
  Any changes on <i>a single HTableDescriptor</i> should be serialized, either within a single thread or via external mechanisms.
  <p>
- Note that having a higher priority means that a constraint will run later; e.g. a constraint with priority 1 will run before a 
- constraint with priority 2. 
+ Note that having a higher priority means that a constraint will run later; e.g. a constraint with priority 1 will run before a
+ constraint with priority 2.
  <p>
- Since Constraints currently are designed to just implement simple checks (e.g. is the value in the right range), there will 
- be no atomicity conflicts. 
- Even if one of the puts finishes the constraint first, the single row will not be corrupted and the 'fastest' write will win; 
+ Since Constraints currently are designed to just implement simple checks (e.g. is the value in the right range), there will
+ be no atomicity conflicts.
+ Even if one of the puts finishes the constraint first, the single row will not be corrupted and the 'fastest' write will win;
  the underlying region takes care of breaking the tie and ensuring that writes get serialized to the table.
- So yes, this doesn't ensure that we are going to get specific ordering or even a fully consistent view of the underlying data. 
+ So yes, this doesn't ensure that we are going to get specific ordering or even a fully consistent view of the underlying data.
  <p>
  Each constraint should only use local/instance variables, unless doing more advanced usage. Static variables could cause difficulties
  when checking concurrent writes to the same region, leading to either highly locked situations (decreasing through-put) or higher probability of errors.
  However, as long as each constraint just uses local variables, each thread interacting with the constraint will execute correctly and efficiently.
 
  <h2><a name="caveats">Caveats</a></h2>
- In traditional (SQL) databases, Constraints are often used to enforce <a href="http://en.wikipedia.org/wiki/Relational_database#Constraints">referential integrity</a>. 
- However, in HBase, this will likely cause significant overhead and dramatically decrease the number of 
- {@link org.apache.hadoop.hbase.client.Put Puts}/second possible on a table. This is because to check the referential integrity 
+ In traditional (SQL) databases, Constraints are often used to enforce <a href="http://en.wikipedia.org/wiki/Relational_database#Constraints">referential integrity</a>.
+ However, in HBase, this will likely cause significant overhead and dramatically decrease the number of
+ {@link org.apache.hadoop.hbase.client.Put Puts}/second possible on a table. This is because to check the referential integrity
  when making a {@link org.apache.hadoop.hbase.client.Put}, one must block on a scan for the 'remote' table, checking for the valid reference.
- For millions of {@link org.apache.hadoop.hbase.client.Put Puts} a second, this will breakdown very quickly. 
+ For millions of {@link org.apache.hadoop.hbase.client.Put Puts} a second, this will breakdown very quickly.
  There are several options around the blocking behavior including, but not limited to:
  <ul>
- <li>Create a 'pre-join' table where the keys are already denormalized</li>  
+ <li>Create a 'pre-join' table where the keys are already denormalized</li>
  <li>Designing for 'incorrect' references</li>
  <li>Using an external enforcement mechanism</li>
  </ul>
  There are also several general considerations that must be taken into account, when using Constraints:
  <ol>
- <li>All changes made via {@link org.apache.hadoop.hbase.constraint.Constraints} will make modifications to the 
- {@link org.apache.hadoop.hbase.HTableDescriptor} for a given table. As such, the usual renabling of tables should be used for 
+ <li>All changes made via {@link org.apache.hadoop.hbase.constraint.Constraints} will make modifications to the
+ {@link org.apache.hadoop.hbase.HTableDescriptor} for a given table. As such, the usual renabling of tables should be used for
  propagating changes to the table. When at all possible, Constraints should be added to the table before the table is created.</li>
- <li>Constraints are run in the order that they are added to a table. This has implications for what order constraints should 
+ <li>Constraints are run in the order that they are added to a table. This has implications for what order constraints should
  be added to a table.</li>
- <li>Whenever new Constraint jars are added to a region server, those region servers need to go through a rolling restart to 
+ <li>Whenever new Constraint jars are added to a region server, those region servers need to go through a rolling restart to
  make sure that they pick up the new jars and can enable the new constraints.</li>
  <li>There are certain keys that are reserved for the Configuration namespace:
  <ul>
  <li>_ENABLED - used server-side to determine if a constraint should be run</li>
  <li>_PRIORITY - used server-side to determine what order a constraint should be run</li>
  </ul>
- If these items are set, they will be respected in the constraint configuration, but they are taken care of by default in when 
+ If these items are set, they will be respected in the constraint configuration, but they are taken care of by default in when
  adding constraints to an {@link org.apache.hadoop.hbase.HTableDescriptor} via the usual method.</li>
  </ol>
- <p> 
- Under the hood, constraints are implemented as a Coprocessor (see {@link org.apache.hadoop.hbase.constraint.ConstraintProcessor} 
+ <p>
+ Under the hood, constraints are implemented as a Coprocessor (see {@link org.apache.hadoop.hbase.constraint.ConstraintProcessor}
  if you are interested).
 
 
  <h2><a name="usage">Example usage</a></h2>
- First, you must define a {@link org.apache.hadoop.hbase.constraint.Constraint}. 
+ First, you must define a {@link org.apache.hadoop.hbase.constraint.Constraint}.
  The best way to do this is to extend {@link org.apache.hadoop.hbase.constraint.BaseConstraint}, which takes care of some of the more
  mundane details of using a {@link org.apache.hadoop.hbase.constraint.Constraint}.
  <p>
- Let's look at one possible implementation of a constraint - an IntegerConstraint(there are also several simple examples in the tests). 
+ Let's look at one possible implementation of a constraint - an IntegerConstraint(there are also several simple examples in the tests).
  The IntegerConstraint checks to make sure that the value is a String-encoded <code>int</code>.
  It is really simple to implement this kind of constraint, the only method needs to be implemented is
  {@link org.apache.hadoop.hbase.constraint.Constraint#check(org.apache.hadoop.hbase.client.Put)}:
@@ -141,18 +141,18 @@
  } catch (NumberFormatException e) {
  throw new ConstraintException("Value in Put (" + p
  + ") was not a String-encoded integer", e);
- } } } 
+ } } }
  </pre></blockquote>
  </div>
  <p>
- Note that all exceptions that you expect to be thrown must be caught and then rethrown as a 
- {@link org.apache.hadoop.hbase.exceptions.ConstraintException}. This way, you can be sure that a
- {@link org.apache.hadoop.hbase.client.Put} fails for an expected reason, rather than for any reason. 
- For example, an {@link java.lang.OutOfMemoryError} is probably indicative of an inherent problem in 
+ Note that all exceptions that you expect to be thrown must be caught and then rethrown as a
+ {@link org.apache.hadoop.hbase.constraint.ConstraintException}. This way, you can be sure that a
+ {@link org.apache.hadoop.hbase.client.Put} fails for an expected reason, rather than for any reason.
+ For example, an {@link java.lang.OutOfMemoryError} is probably indicative of an inherent problem in
  the {@link org.apache.hadoop.hbase.constraint.Constraint}, rather than a failed {@link org.apache.hadoop.hbase.client.Put}.
  <p>
  If an unexpected exception is thrown (for example, any kind of uncaught {@link java.lang.RuntimeException}),
- constraint-checking will be 'unloaded' from the regionserver where that error occurred. 
+ constraint-checking will be 'unloaded' from the regionserver where that error occurred.
  This means no further {@link org.apache.hadoop.hbase.constraint.Constraint Constraints} will be checked on that server
  until it is reloaded. This is done to ensure the system remains as available as possible.
  Therefore, be careful when writing your own Constraint.
@@ -166,14 +166,14 @@
  Constraints.add(desc, IntegerConstraint.class);
  </pre></blockquote></div>
  <p>
- Once we added the IntegerConstraint, constraints will be enabled on the table (once it is created) and 
+ Once we added the IntegerConstraint, constraints will be enabled on the table (once it is created) and
  we will always check to make sure that the value is an String-encoded integer.
- <p> 
+ <p>
  However, suppose we also write our own constraint, <code>MyConstraint.java</code>.
- First, you need to make sure this class-files are in the classpath (in a jar) on the regionserver where 
+ First, you need to make sure this class-files are in the classpath (in a jar) on the regionserver where
  that constraint will be run (this could require a rolling restart on the region server - see <a href="#caveats">Caveats</a> above)
  <p>
- Suppose that MyConstraint also uses a Configuration (see {@link org.apache.hadoop.hbase.constraint.Constraint#getConf()}). 
+ Suppose that MyConstraint also uses a Configuration (see {@link org.apache.hadoop.hbase.constraint.Constraint#getConf()}).
  Then adding MyConstraint looks like this:
 
  <div style="background-color: #cccccc; padding: 2px">
@@ -191,7 +191,7 @@
  <i>will be run first</i>, followed by MyConstraint.
  <p>
  Suppose we realize that the {@link org.apache.hadoop.conf.Configuration} for MyConstraint is actually wrong
- when it was added to the table. Note, when it is added to the table, it is <i>not</i> added by reference, 
+ when it was added to the table. Note, when it is added to the table, it is <i>not</i> added by reference,
  but is instead copied into the {@link org.apache.hadoop.hbase.HTableDescriptor}.
  Thus, to change the {@link org.apache.hadoop.conf.Configuration} we are using for MyConstraint, we need to do this:
 
@@ -202,7 +202,7 @@
  Constraints.setConfiguration(desc, MyConstraint.class, conf);
  </pre></blockquote></div>
  <p>
- This will overwrite the previous configuration for MyConstraint, but <i>not</i> change the order of the 
+ This will overwrite the previous configuration for MyConstraint, but <i>not</i> change the order of the
  constraint nor if it is enabled/disabled.
  <p>
  Note that the same constraint class can be added multiple times to a table without repercussion.
@@ -216,7 +216,7 @@
  </pre></blockquote></div>
  <p>
  This just turns off MyConstraint, but retains the position and the configuration associated with MyConstraint.
- Now, if we want to re-enable the constraint, its just another one-liner: 
+ Now, if we want to re-enable the constraint, its just another one-liner:
  <div style="background-color: #cccccc">
  <blockquote><pre>
  Constraints.enable(desc, MyConstraint.class);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/HThreadedSelectorServerArgs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/HThreadedSelectorServerArgs.java
index f19ec5f..8c4ab61 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/HThreadedSelectorServerArgs.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/HThreadedSelectorServerArgs.java
@@ -19,21 +19,19 @@
 
 package org.apache.hadoop.hbase.thrift;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.thrift.server.TThreadedSelectorServer;
 import org.apache.thrift.transport.TNonblockingServerTransport;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * A TThreadedSelectorServer.Args that reads hadoop configuration
  */
 @InterfaceAudience.Private
 public class HThreadedSelectorServerArgs extends TThreadedSelectorServer.Args {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TThreadedSelectorServer.class);
+  private static final Log LOG = LogFactory.getLog(TThreadedSelectorServer.class);
 
   /**
    * Number of selector threads for reading and writing socket
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
index 8741952..423c5c6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
@@ -65,8 +65,8 @@
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.junit.experimental.categories.Category;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 
 /**
  * Standup the master and fake it to test various aspects of master function.
@@ -78,7 +78,7 @@
  */
 @Category(MediumTests.class)
 public class TestMasterNoCluster {
-  private static Logger LOG = LoggerFactory.getLogger(TestMasterNoCluster.class);
+  private static final Log LOG = LogFactory.getLog(TestMasterNoCluster.class);
   private static final HBaseTestingUtility TESTUTIL = new HBaseTestingUtility();
 
   @BeforeClass
@@ -240,7 +240,7 @@
    * @throws IOException
    * @throws KeeperException
    * @throws InterruptedException
-   * @throws DeserializationException 
+   * @throws DeserializationException
    * @throws ServiceException
    */
   @Test (timeout=30000)
diff --git a/pom.xml b/pom.xml
index 1a3e7d8..92385c0 100644
--- a/pom.xml
+++ b/pom.xml
@@ -461,6 +461,7 @@
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-release-plugin</artifactId>
+          <version>2.4.1</version>
           <!--Making a release I've been using mvn 3.0 and specifying the apache-release
               profile on the command line as follows:
 
@@ -480,6 +481,7 @@
                 But it builds the test jar.  From SUREFIRE-172.
               -->
             <arguments>-Dmaven.test.skip.exec</arguments>
+            <pomFileName>pom.xml</pomFileName>
           </configuration>
         </plugin>
         <plugin>
@@ -609,6 +611,9 @@
             <execution>
               <phase>prepare-package</phase>
               <goals>
+                  <!--This goal will install a -test.jar when we do install
+                      See http://maven.apache.org/guides/mini/guide-attached-tests.html
+                   -->
                 <goal>test-jar</goal>
               </goals>
             </execution>
@@ -884,7 +889,9 @@
     <commons-io.version>2.4</commons-io.version>
     <commons-lang.version>2.6</commons-lang.version>
     <commons-logging.version>1.1.1</commons-logging.version>
-    <commons-math.version>2.1</commons-math.version>
+    <commons-math.version>2.2</commons-math.version>
+    <collections.version>3.2.1</collections.version>
+    <httpclient.version>3.0.1</httpclient.version>
     <metrics-core.version>2.1.2</metrics-core.version>
     <guava.version>12.0.1</guava.version>
     <jackson.version>1.8.8</jackson.version>
@@ -896,13 +903,13 @@
     <jruby.version>1.6.8</jruby.version>
     <junit.version>4.11</junit.version>
     <htrace.version>1.50</htrace.version>
-    <slf4j.version>1.4.3</slf4j.version>
     <log4j.version>1.2.17</log4j.version>
     <mockito-all.version>1.9.0</mockito-all.version>
     <protobuf.version>2.4.1</protobuf.version>
     <stax-api.version>1.0.1</stax-api.version>
     <thrift.version>0.9.0</thrift.version>
     <zookeeper.version>3.4.5</zookeeper.version>
+    <slf4j.version>1.6.4</slf4j.version>
     <hadoop-snappy.version>0.0.1-SNAPSHOT</hadoop-snappy.version>
     <clover.version>2.6.3</clover.version>
     <jamon-runtime.version>2.3.1</jamon-runtime.version>
@@ -1046,6 +1053,18 @@
         <version>${jettison.version}</version>
       </dependency>
       <dependency>
+        <groupId>log4j</groupId>
+        <artifactId>log4j</artifactId>
+        <version>${log4j.version}</version>
+      </dependency>
+      <!--This is not used by hbase directly.  Used by thrift,
+          yammer and zk.-->
+      <dependency>
+        <groupId>org.slf4j</groupId>
+        <artifactId>slf4j-api</artifactId>
+        <version>${slf4j.version}</version>
+      </dependency>
+      <dependency>
         <groupId>com.yammer.metrics</groupId>
         <artifactId>metrics-core</artifactId>
         <version>${metrics-core.version}</version>
@@ -1056,6 +1075,16 @@
         <version>${guava.version}</version>
       </dependency>
       <dependency>
+        <groupId>commons-collections</groupId>
+        <artifactId>commons-collections</artifactId>
+        <version>${collections.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>commons-httpclient</groupId>
+        <artifactId>commons-httpclient</artifactId>
+        <version>${httpclient.version}</version>
+      </dependency>
+      <dependency>
         <groupId>commons-cli</groupId>
         <artifactId>commons-cli</artifactId>
         <version>${commons-cli.version}</version>
@@ -1091,11 +1120,6 @@
         <version>${commons-math.version}</version>
       </dependency>
       <dependency>
-        <groupId>log4j</groupId>
-        <artifactId>log4j</artifactId>
-        <version>${log4j.version}</version>
-      </dependency>
-      <dependency>
         <groupId>org.apache.zookeeper</groupId>
         <artifactId>zookeeper</artifactId>
         <version>${zookeeper.version}</version>
@@ -1204,16 +1228,6 @@
         <version>${jackson.version}</version>
       </dependency>
       <dependency>
-        <groupId>org.slf4j</groupId>
-        <artifactId>slf4j-api</artifactId>
-        <version>${slf4j.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.slf4j</groupId>
-        <artifactId>slf4j-log4j12</artifactId>
-        <version>${slf4j.version}</version>
-      </dependency>
-      <dependency>
         <!--If this is not in the runtime lib, we get odd
       "2009-02-27 11:38:39.504::WARN:  failed jsp
        java.lang.NoSuchFieldError: IS_SECURITY_ENABLED"
@@ -1297,6 +1311,12 @@
         <groupId>junit</groupId>
         <artifactId>junit</artifactId>
         <version>${junit.version}</version>
+        <exclusions>
+          <exclusion>
+            <groupId>org.hamcrest</groupId>
+            <artifactId>hamcrest-core</artifactId>
+          </exclusion>
+        </exclusions>
       </dependency>
       <dependency>
         <groupId>org.mockito</groupId>
@@ -1319,6 +1339,10 @@
       <version>${findbugs-annotations}</version>
       <scope>compile</scope>
     </dependency>
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+    </dependency>
     <!-- Test dependencies -->
     <dependency>
       <groupId>junit</groupId>
@@ -1438,7 +1462,8 @@
       <id>hadoop-1.1</id>
       <activation>
         <property>
-          <name>!hadoop.profile</name>
+            <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+            <!--h1--><name>!hadoop.profile</name>
         </property>
       </activation>
       <modules>
@@ -1446,7 +1471,6 @@
       </modules>
       <properties>
         <hadoop.version>${hadoop-one.version}</hadoop.version>
-        <slf4j.version>1.4.3</slf4j.version>
         <compat.module>hbase-hadoop1-compat</compat.module>
         <assembly.file>src/main/assembly/hadoop-one-compat.xml</assembly.file>
       </properties>
@@ -1507,7 +1531,6 @@
         <hadoop.version>1.0.4</hadoop.version>
         <!-- Need to set this for the Hadoop 1 compat module -->
         <hadoop-one.version>${hadoop.version}</hadoop-one.version>
-        <slf4j.version>1.4.3</slf4j.version>
         <compat.module>hbase-hadoop1-compat</compat.module>
         <assembly.file>src/main/assembly/hadoop-one-compat.xml</assembly.file>
       </properties>
@@ -1558,8 +1581,8 @@
       <id>hadoop-2.0</id>
       <activation>
         <property>
-          <name>hadoop.profile</name>
-          <value>2.0</value>
+            <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+            <!--h2--><name>hadoop.profile</name><value>2.0</value>
         </property>
       </activation>
       <modules>
@@ -1567,7 +1590,6 @@
       </modules>
       <properties>
         <hadoop.version>${hadoop-two.version}</hadoop.version>
-        <slf4j.version>1.6.1</slf4j.version>
         <compat.module>hbase-hadoop2-compat</compat.module>
         <assembly.file>src/main/assembly/hadoop-two-compat.xml</assembly.file>
       </properties>
@@ -1575,6 +1597,38 @@
         <dependencies>
           <dependency>
             <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-mapreduce-client-core</artifactId>
+            <version>${hadoop-two.version}</version>
+          </dependency>
+          <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+            <version>${hadoop-two.version}</version>
+          </dependency>
+          <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+            <version>${hadoop-two.version}</version>
+            <type>test-jar</type>
+          </dependency>
+          <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-hdfs</artifactId>
+            <version>${hadoop-two.version}</version>
+          </dependency>
+          <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-hdfs</artifactId>
+            <version>${hadoop-two.version}</version>
+            <type>test-jar</type>
+          </dependency>
+          <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-auth</artifactId>
+            <version>${hadoop-two.version}</version>
+          </dependency>
+          <dependency>
+            <groupId>org.apache.hadoop</groupId>
             <artifactId>hadoop-common</artifactId>
             <version>${hadoop-two.version}</version>
           </dependency>
@@ -1625,7 +1679,6 @@
         </property>
       </activation>
       <properties>
-        <slf4j.version>1.6.1</slf4j.version>
         <hadoop.version>3.0.0-SNAPSHOT</hadoop.version>
       </properties>
       <dependencies>