Merge r1337003 through r1346681 from trunk.


git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3092@1346682 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/BUILDING.txt b/BUILDING.txt
index de3d45b..b0a2740 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -9,7 +9,7 @@
 * Forrest 0.8 (if generating docs)
 * Findbugs 1.3.9 (if running findbugs)
 * ProtocolBuffer 2.4.1+ (for MapReduce and HDFS)
-* Autotools (if compiling native code)
+* CMake 2.6 or newer (if compiling native code)
 * Internet connection for first build (to fetch all Maven and Hadoop dependencies)
 
 ----------------------------------------------------------------------------------
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 2d6bcfe..fb75b90 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -601,7 +601,7 @@
       $PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.xml \
       $PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.html
     if [[ $newFindbugsWarnings > 0 ]] ; then
-      JIRA_COMMENT_FOOTER="Findbugs warnings: $BUILD_URL/artifact/trunk/$(basename $BASEDIR)/patchprocess/newPatchFindbugsWarnings${module_suffix}.html
+      JIRA_COMMENT_FOOTER="Findbugs warnings: $BUILD_URL/artifact/trunk/patchprocess/newPatchFindbugsWarnings${module_suffix}.html
 $JIRA_COMMENT_FOOTER"
     fi
   done
diff --git a/hadoop-assemblies/pom.xml b/hadoop-assemblies/pom.xml
index 560ea78..66b6bdb 100644
--- a/hadoop-assemblies/pom.xml
+++ b/hadoop-assemblies/pom.xml
@@ -15,7 +15,10 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-client/pom.xml b/hadoop-client/pom.xml
index d2682c2..2474652 100644
--- a/hadoop-client/pom.xml
+++ b/hadoop-client/pom.xml
@@ -109,10 +109,6 @@
         </exclusion>
         <exclusion>
           <groupId>org.apache.avro</groupId>
-          <artifactId>avro</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.avro</groupId>
           <artifactId>avro-ipc</artifactId>
         </exclusion>
         <exclusion>
diff --git a/hadoop-common-project/hadoop-annotations/pom.xml b/hadoop-common-project/hadoop-annotations/pom.xml
index e9a8647..2994fb5 100644
--- a/hadoop-common-project/hadoop-annotations/pom.xml
+++ b/hadoop-common-project/hadoop-annotations/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-common-project/hadoop-auth-examples/pom.xml b/hadoop-common-project/hadoop-auth-examples/pom.xml
index ceeb769..74364c7 100644
--- a/hadoop-common-project/hadoop-auth-examples/pom.xml
+++ b/hadoop-common-project/hadoop-auth-examples/pom.xml
@@ -12,7 +12,10 @@
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml
index b140199..4e4bb8b 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -12,7 +12,10 @@
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
index 9564026..6ff30f7 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
@@ -84,7 +84,7 @@
     try {
       defaultRealm = KerberosUtil.getDefaultRealm();
     } catch (Exception ke) {
-        LOG.warn("Kerberos krb5 configuration not found, setting default realm to empty");
+        LOG.debug("Kerberos krb5 configuration not found, setting default realm to empty");
         defaultRealm="";
     }
   }
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index d293fe4..259b5a4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -12,6 +12,9 @@
     HADOOP-8135. Add ByteBufferReadable interface to FSDataInputStream. (Henry
     Robinson via atm)
 
+    HDFS-3042. Automatic failover support for NameNode HA (todd)
+    (see dedicated section below for breakdown of subtasks)
+
   IMPROVEMENTS
 
     HADOOP-8017. Configure hadoop-main pom to get rid of M2E plugin execution
@@ -63,10 +66,22 @@
 
     HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh)
 
-    HADOOP-8285 Use ProtoBuf for RpcPayLoadHeader (sanjay radia)
-
     HADOOP-8308. Support cross-project Jenkins builds. (tomwhite)
 
+    HADOOP-8297. Writable javadocs don't carry default constructor (harsh)
+
+    HADOOP-8360. empty-configuration.xml fails xml validation
+    (Radim Kolar via harsh)
+
+    HADOOP-8367 Improve documentation of declaringClassProtocolName in rpc headers 
+                (Sanjay Radia)
+
+    HADOOP-8415. Add getDouble() and setDouble() in
+    org.apache.hadoop.conf.Configuration (Jan van der Lugt via harsh)
+
+    HADOOP-7659. fs -getmerge isn't guaranteed to work well over non-HDFS
+    filesystems (harsh)
+
   BUG FIXES
 
     HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
@@ -135,19 +150,129 @@
     HADOOP-8375. test-patch should stop immediately once it has found
     compilation errors (bobby)
 
+    HADOOP-8395. Text shell command unnecessarily demands that a
+    SequenceFile's key class be WritableComparable (harsh)
+
+    HADOOP-8413. test-patch.sh gives out the wrong links for
+    newPatchFindbugsWarnings (Colin Patrick McCabe via bobby)
+
+    HADOOP-6871. When the value of a configuration key is set to its
+    unresolved form, it causes the IllegalStateException in
+    Configuration.get() stating that substitution depth is too large.
+    (Arvind Prabhakar via harsh)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
 
-Release 2.0.0 - UNRELEASED 
+  BREAKDOWN OF HDFS-3042 SUBTASKS
+
+    HADOOP-8220. ZKFailoverController doesn't handle failure to become active
+    correctly (todd)
+    
+    HADOOP-8228. Auto HA: Refactor tests and add stress tests. (todd)
+    
+    HADOOP-8215. Security support for ZK Failover controller (todd)
+    
+    HADOOP-8245. Fix flakiness in TestZKFailoverController (todd)
+    
+    HADOOP-8257. TestZKFailoverControllerStress occasionally fails with Mockito
+    error (todd)
+    
+    HADOOP-8260. Replace ClientBaseWithFixes with our own modified copy of the
+    class (todd)
+    
+    HADOOP-8246. Auto-HA: automatically scope znode by nameservice ID (todd)
+    
+    HADOOP-8247. Add a config to enable auto-HA, which disables manual
+    FailoverController (todd)
+    
+    HADOOP-8306. ZKFC: improve error message when ZK is not running. (todd)
+    
+    HADOOP-8279. Allow manual failover to be invoked when auto-failover is
+    enabled. (todd)
+    
+    HADOOP-8276. Auto-HA: add config for java options to pass to zkfc daemon
+    (todd via eli)
+    
+    HADOOP-8405. ZKFC tests leak ZK instances. (todd)
+
+Release 2.0.1-alpha - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+    HADOOP-8388. Remove unused BlockLocation serialization.
+    (Colin Patrick McCabe via eli)
+
+  NEW FEATURES
+ 
+  IMPROVEMENTS
+
+    HADOOP-8340. SNAPSHOT build versions should compare as less than their eventual
+    final release. (todd)
+
+    HADOOP-8361. Avoid out-of-memory problems when deserializing strings.
+    (Colin Patrick McCabe via eli)
+
+    HADOOP-8224. Don't hardcode hdfs.audit.logger in the scripts.
+    (Tomohiko Kinebuchi via eli)
+
+    HADOOP-8398. Cleanup BlockLocation. (eli)
+
+    HADOOP-8422. Deprecate FileSystem#getDefault* and getServerDefault
+    methods that don't take a Path argument. (eli)
+
+    HADOOP-8323. Add javadoc and tests for Text.clear() behavior (harsh)
+
+    HADOOP-8358. Config-related WARN for dfs.web.ugi can be avoided. (harsh)
+
+    HADOOP-8450. Remove src/test/system. (eli)
+
+  BUG FIXES
+
+    HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
+    starting with a numeric character. (Junping Du via suresh)
+
+    HADOOP-8393. hadoop-config.sh missing variable exports, causes Yarn jobs to fail with ClassNotFoundException MRAppMaster. (phunt via tucu)
+
+    HADOOP-8316. Audit logging should be disabled by default. (eli)
+
+    HADOOP-8400. All commands warn "Kerberos krb5 configuration not found" when security is not enabled. (tucu)
+
+    HADOOP-8406. CompressionCodecFactory.CODEC_PROVIDERS iteration is
+    thread-unsafe (todd)
+
+    HADOOP-8287. etc/hadoop is missing hadoop-env.sh (eli)
+
+    HADOOP-8408. MR doesn't work with a non-default ViewFS mount table
+    and security enabled. (atm via eli)
+
+    HADOOP-8329. Build fails with Java 7. (eli)
+
+    HADOOP-8268. A few pom.xml across Hadoop project
+    may fail XML validation. (Radim Kolar via harsh)
+
+    HADOOP-8444. Fix the tests FSMainOperationsBaseTest.java and
+    FileContextMainOperationsBaseTest.java to avoid potential
+    test failure (Madhukara Phatak via harsh)
+
+    HADOOP-8452. DN logs backtrace when running under jsvc and /jmx is loaded 
+    (Andy Isaacson via bobby)
+
+    HADOOP-8460. Document proper setting of HADOOP_PID_DIR and 
+    HADOOP_SECURE_DN_PID_DIR (bobby)
+
+    HADOOP-8466. hadoop-client POM incorrectly excludes avro. (bmahe via tucu)
+
+    HADOOP-8481. update BUILDING.txt to talk about cmake rather than autotools.
+    (Colin Patrick McCabe via eli)
+
+Release 2.0.0-alpha - 05-23-2012
 
   INCOMPATIBLE CHANGES
 
     HADOOP-7920. Remove Avro Rpc. (suresh)
 
-    HADOOP-8388. Remove unused BlockLocation serialization.
-    (Colin Patrick McCabe via eli)
-
   NEW FEATURES
 
     HADOOP-7773. Add support for protocol buffer based RPC engine.
@@ -305,11 +430,15 @@
     HADOOP-8356. FileSystem service loading mechanism should print the FileSystem 
     impl it is failing to load (tucu)
 
-    HADOOP-8340. SNAPSHOT build versions should compare as less than their eventual
-    final release. (todd)
+    HADOOP-8353. hadoop-daemon.sh and yarn-daemon.sh can be misleading on stop.
+    (Roman Shaposhnik via atm)
 
-    HADOOP-8361. Avoid out-of-memory problems when deserializing strings.
-    (Colin Patrick McCabe via eli)
+    HADOOP-8113. Correction to BUILDING.txt: HDFS needs ProtocolBuffer, too
+    (not just MapReduce). Contributed by Eugene Koontz.
+
+    HADOOP-8285 Use ProtoBuf for RpcPayLoadHeader (sanjay radia)
+
+    HADOOP-8366 Use ProtoBuf for RpcResponseHeader (sanjay radia)
 
   OPTIMIZATIONS
 
@@ -444,9 +573,6 @@
     HADOOP-8359. Fix javadoc warnings in Configuration.  (Anupam Seth via
     szetszwo)
 
-    HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
-    starting with a numeric character. (Junping Du via suresh)
-
   BREAKDOWN OF HADOOP-7454 SUBTASKS
 
     HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)
diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index 44092c0..c1c3cd2 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -290,5 +290,9 @@
       <!-- protobuf generated code -->
       <Class name="~org\.apache\.hadoop\.ha\.proto\.HAServiceProtocolProtos.*"/>
     </Match>
+    <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.ha\.proto\.ZKFCProtocolProtos.*"/>
+    </Match>
 
  </FindBugsFilter>
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index ca505b4..a36b74d 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
@@ -340,7 +343,7 @@
                 <echo file="target/compile-proto.sh">
                     PROTO_DIR=src/main/proto
                     JAVA_DIR=target/generated-sources/java
-                    which cygpath 2> /dev/null
+                    which cygpath 2&gt; /dev/null
                     if [ $? = 1 ]; then
                       IS_WIN=false
                     else
@@ -348,8 +351,8 @@
                       WIN_PROTO_DIR=`cygpath --windows $PROTO_DIR`
                       WIN_JAVA_DIR=`cygpath --windows $JAVA_DIR`
                     fi
-                    mkdir -p $JAVA_DIR 2> /dev/null
-                    for PROTO_FILE in `ls $PROTO_DIR/*.proto 2> /dev/null`
+                    mkdir -p $JAVA_DIR 2&gt; /dev/null
+                    for PROTO_FILE in `ls $PROTO_DIR/*.proto 2&gt; /dev/null`
                     do
                         if [ "$IS_WIN" = "true" ]; then
                           protoc -I$WIN_PROTO_DIR --java_out=$WIN_JAVA_DIR $PROTO_FILE
@@ -375,7 +378,7 @@
                 <echo file="target/compile-test-proto.sh">
                     PROTO_DIR=src/test/proto
                     JAVA_DIR=target/generated-test-sources/java
-                    which cygpath 2> /dev/null
+                    which cygpath 2&gt; /dev/null
                     if [ $? = 1 ]; then
                       IS_WIN=false
                     else
@@ -383,8 +386,8 @@
                       WIN_PROTO_DIR=`cygpath --windows $PROTO_DIR`
                       WIN_JAVA_DIR=`cygpath --windows $JAVA_DIR`
                     fi
-                    mkdir -p $JAVA_DIR 2> /dev/null
-                    for PROTO_FILE in `ls $PROTO_DIR/*.proto 2> /dev/null`
+                    mkdir -p $JAVA_DIR 2&gt; /dev/null
+                    for PROTO_FILE in `ls $PROTO_DIR/*.proto 2&gt; /dev/null`
                     do
                         if [ "$IS_WIN" = "true" ]; then
                           protoc -I$WIN_PROTO_DIR --java_out=$WIN_JAVA_DIR $PROTO_FILE
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
index 6b1dd2e..aa971f9 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
@@ -172,7 +172,7 @@
 
 if [ "$HADOOP_COMMON_HOME" = "" ]; then
   if [ -d "${HADOOP_PREFIX}/$HADOOP_COMMON_DIR" ]; then
-    HADOOP_COMMON_HOME=$HADOOP_PREFIX
+    export HADOOP_COMMON_HOME=$HADOOP_PREFIX
   fi
 fi
 
@@ -252,7 +252,7 @@
 # put hdfs in classpath if present
 if [ "$HADOOP_HDFS_HOME" = "" ]; then
   if [ -d "${HADOOP_PREFIX}/$HDFS_DIR" ]; then
-    HADOOP_HDFS_HOME=$HADOOP_PREFIX
+    export HADOOP_HDFS_HOME=$HADOOP_PREFIX
   fi
 fi
 
@@ -269,7 +269,7 @@
 # put yarn in classpath if present
 if [ "$YARN_HOME" = "" ]; then
   if [ -d "${HADOOP_PREFIX}/$YARN_DIR" ]; then
-    YARN_HOME=$HADOOP_PREFIX
+    export YARN_HOME=$HADOOP_PREFIX
   fi
 fi
 
@@ -286,7 +286,7 @@
 # put mapred in classpath if present AND different from YARN
 if [ "$HADOOP_MAPRED_HOME" = "" ]; then
   if [ -d "${HADOOP_PREFIX}/$MAPRED_DIR" ]; then
-    HADOOP_MAPRED_HOME=$HADOOP_PREFIX
+    export HADOOP_MAPRED_HOME=$HADOOP_PREFIX
   fi
 fi
 
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
index 1a4d644..9d19250 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
@@ -109,8 +109,10 @@
 export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log
 export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-"INFO,RFA"}
 export HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-"INFO,RFAS"}
+export HDFS_AUDIT_LOGGER=${HDFS_AUDIT_LOGGER:-"INFO,NullAppender"}
 log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out
 pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid
+HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5}
 
 # Set default scheduling priority
 if [ "$HADOOP_NICENESS" = "" ]; then
@@ -139,7 +141,7 @@
     echo starting $command, logging to $log
     cd "$HADOOP_PREFIX"
     case $command in
-      namenode|secondarynamenode|datanode|dfs|dfsadmin|fsck|balancer)
+      namenode|secondarynamenode|datanode|dfs|dfsadmin|fsck|balancer|zkfc)
         if [ -z "$HADOOP_HDFS_HOME" ]; then
           hdfsScript="$HADOOP_PREFIX"/bin/hdfs
         else
@@ -162,9 +164,15 @@
   (stop)
 
     if [ -f $pid ]; then
-      if kill -0 `cat $pid` > /dev/null 2>&1; then
+      TARGET_PID=`cat $pid`
+      if kill -0 $TARGET_PID > /dev/null 2>&1; then
         echo stopping $command
-        kill `cat $pid`
+        kill $TARGET_PID
+        sleep $HADOOP_STOP_TIMEOUT
+        if kill -0 $TARGET_PID > /dev/null 2>&1; then
+          echo "$command did not stop gracefully after $HADOOP_STOP_TIMEOUT seconds: killing with kill -9"
+          kill -9 $TARGET_PID
+        fi
       else
         echo no $command to stop
       fi
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
new file mode 100644
index 0000000..33abeca
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
@@ -0,0 +1,77 @@
+# Copyright 2011 The Apache Software Foundation
+# 
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.
+export JAVA_HOME=${JAVA_HOME}
+
+# The jsvc implementation to use. Jsvc is required to run secure datanodes.
+#export JSVC_HOME=${JSVC_HOME}
+
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
+
+# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
+for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
+  if [ "$HADOOP_CLASSPATH" ]; then
+    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
+  else
+    export HADOOP_CLASSPATH=$f
+  fi
+done
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+#export HADOOP_HEAPSIZE=
+#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
+export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS"
+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
+
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
+
+# The directory where pid files are stored. /tmp by default.
+# NOTE: this should be set to a directory that can only be written to by 
+#       the user that will run the hadoop daemons.  Otherwise there is the
+#       potential for a symlink attack.
+export HADOOP_PID_DIR=${HADOOP_PID_DIR}
+export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 3470b3e..63e27cf 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -102,7 +102,7 @@
 #
 #Security appender
 #
-hadoop.security.logger=INFO,console
+hadoop.security.logger=INFO,NullAppender
 hadoop.security.log.maxfilesize=256MB
 hadoop.security.log.maxbackupindex=20
 log4j.category.SecurityLogger=${hadoop.security.logger}
@@ -126,7 +126,7 @@
 #
 # hdfs audit logging
 #
-hdfs.audit.logger=INFO,console
+hdfs.audit.logger=INFO,NullAppender
 hdfs.audit.log.maxfilesize=256MB
 hdfs.audit.log.maxbackupindex=20
 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
@@ -141,7 +141,7 @@
 #
 # mapred audit logging
 #
-mapred.audit.logger=INFO,console
+mapred.audit.logger=INFO,NullAppender
 mapred.audit.log.maxfilesize=256MB
 mapred.audit.log.maxbackupindex=20
 log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index d1ef7a4..917f97c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -617,7 +617,13 @@
     }
     Matcher match = varPat.matcher("");
     String eval = expr;
+    Set<String> evalSet = new HashSet<String>();
     for(int s=0; s<MAX_SUBST; s++) {
+      if (evalSet.contains(eval)) {
+        // Cyclic resolution pattern detected. Return current expression.
+        return eval;
+      }
+      evalSet.add(eval);
       match.reset(eval);
       if (!match.find()) {
         return eval;
@@ -917,6 +923,7 @@
       return defaultValue;
     return Float.parseFloat(valueString);
   }
+
   /**
    * Set the value of the <code>name</code> property to a <code>float</code>.
    * 
@@ -926,6 +933,35 @@
   public void setFloat(String name, float value) {
     set(name,Float.toString(value));
   }
+
+  /** 
+   * Get the value of the <code>name</code> property as a <code>double</code>.  
+   * If no such property exists, the provided default value is returned,
+   * or if the specified value is not a valid <code>double</code>,
+   * then an error is thrown.
+   *
+   * @param name property name.
+   * @param defaultValue default value.
+   * @throws NumberFormatException when the value is invalid
+   * @return property value as a <code>double</code>, 
+   *         or <code>defaultValue</code>. 
+   */
+  public double getDouble(String name, double defaultValue) {
+    String valueString = getTrimmed(name);
+    if (valueString == null)
+      return defaultValue;
+    return Double.parseDouble(valueString);
+  }
+
+  /**
+   * Set the value of the <code>name</code> property to a <code>double</code>.
+   * 
+   * @param name property name.
+   * @param value property value.
+   */
+  public void setDouble(String name, double value) {
+    set(name,Double.toString(value));
+  }
  
   /** 
    * Get the value of the <code>name</code> property as a <code>boolean</code>.  
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
index 46989f2..cfe9ee8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
@@ -17,29 +17,23 @@
  */
 package org.apache.hadoop.fs;
 
-import java.io.DataInput;
-import java.io.DataOutput;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
-/*
- * A BlockLocation lists hosts, offset and length
- * of block. 
- * 
+/**
+ * Represents the network location of a block, information about the hosts
+ * that contain block replicas, and other block metadata (E.g. the file
+ * offset associated with the block, length, whether it is corrupt, etc).
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class BlockLocation {
-  private String[] hosts; //hostnames of datanodes
-  private String[] names; //hostname:portNumber of datanodes
-  private String[] topologyPaths; // full path name in network topology
-  private long offset;  //offset of the of the block in the file
+  private String[] hosts; // Datanode hostnames
+  private String[] names; // Datanode IP:xferPort for accessing the block
+  private String[] topologyPaths; // Full path name in network topology
+  private long offset;  // Offset of the block in the file
   private long length;
   private boolean corrupt;
 
@@ -105,7 +99,7 @@
    * Get the list of hosts (hostname) hosting this block
    */
   public String[] getHosts() throws IOException {
-    if ((hosts == null) || (hosts.length == 0)) {
+    if (hosts == null || hosts.length == 0) {
       return new String[0];
     } else {
       return hosts;
@@ -113,25 +107,25 @@
   }
 
   /**
-   * Get the list of names (hostname:port) hosting this block
+   * Get the list of names (IP:xferPort) hosting this block
    */
   public String[] getNames() throws IOException {
-    if ((names == null) || (names.length == 0)) {
+    if (names == null || names.length == 0) {
       return new String[0];
     } else {
-      return this.names;
+      return names;
     }
   }
 
   /**
    * Get the list of network topology paths for each of the hosts.
-   * The last component of the path is the host.
+   * The last component of the path is the "name" (IP:xferPort).
    */
   public String[] getTopologyPaths() throws IOException {
-    if ((topologyPaths == null) || (topologyPaths.length == 0)) {
+    if (topologyPaths == null || topologyPaths.length == 0) {
       return new String[0];
     } else {
-      return this.topologyPaths;
+      return topologyPaths;
     }
   }
   
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 52cb1f3..29e4f13 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -20,6 +20,7 @@
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.http.lib.StaticUserWebFilter;
 
 /** 
  * This class contains constants for configuration keys used
@@ -116,6 +117,8 @@
       "security.refresh.user.mappings.protocol.acl";
   public static final String 
   SECURITY_HA_SERVICE_PROTOCOL_ACL = "security.ha.service.protocol.acl";
+  public static final String 
+  SECURITY_ZKFC_PROTOCOL_ACL = "security.zkfc.protocol.acl";
   
   public static final String HADOOP_SECURITY_TOKEN_SERVICE_USE_IP =
       "hadoop.security.token.service.use_ip";
@@ -161,5 +164,12 @@
     "ha.failover-controller.cli-check.rpc-timeout.ms";
   public static final int HA_FC_CLI_CHECK_TIMEOUT_DEFAULT = 20000;
 
+  /** Static user web-filter properties.
+   * See {@link StaticUserWebFilter}.
+   */
+  public static final String HADOOP_HTTP_STATIC_USER =
+    "hadoop.http.staticuser.user";
+  public static final String DEFAULT_HADOOP_HTTP_STATIC_USER =
+    "dr.who";
 }
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
index 4373124..252f37b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
@@ -44,6 +44,9 @@
  * else append to an existing file.</li>
  * <li> CREATE|OVERWRITE - to create a file if it does not exist, 
  * else overwrite an existing file.</li>
+ * <li> SYNC_BLOCK - to force closed blocks to the disk device.
+ * In addition {@link Syncable#hsync()} should be called after each write,
+ * if true synchronous behavior is required.</li>
  * </ol>
  * 
  * Following combination is not valid and will result in 
@@ -71,7 +74,12 @@
   /**
    * Append to a file. See javadoc for more description.
    */
-  APPEND((short) 0x04);
+  APPEND((short) 0x04),
+
+  /**
+   * Force closed blocks to disk. Similar to POSIX O_SYNC. See javadoc for description.
+   */
+  SYNC_BLOCK((short) 0x08);
 
   private final short mode;
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index b8879a2..1411130 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -615,7 +615,9 @@
    * Return a set of server default configuration values
    * @return server default configuration values
    * @throws IOException
+   * @deprecated use {@link #getServerDefaults(Path)} instead
    */
+  @Deprecated
   public FsServerDefaults getServerDefaults() throws IOException {
     Configuration conf = getConf();
     return new FsServerDefaults(getDefaultBlockSize(), 
@@ -828,6 +830,30 @@
       long blockSize,
       Progressable progress) throws IOException;
   
+  /**
+   * Create an FSDataOutputStream at the indicated Path with write-progress
+   * reporting.
+   * @param f the file name to open
+   * @param permission
+   * @param flags {@link CreateFlag}s to use for this stream.
+   * @param bufferSize the size of the buffer to be used.
+   * @param replication required block replication for the file.
+   * @param blockSize
+   * @param progress
+   * @throws IOException
+   * @see #setPermission(Path, FsPermission)
+   */
+  public FSDataOutputStream create(Path f,
+      FsPermission permission,
+      EnumSet<CreateFlag> flags,
+      int bufferSize,
+      short replication,
+      long blockSize,
+      Progressable progress) throws IOException {
+    // only DFS support this
+    return create(f, permission, flags.contains(CreateFlag.OVERWRITE), bufferSize, replication, blockSize, progress);
+  }
+  
   
   /*.
    * This create has been added to support the FileContext that processes
@@ -952,10 +978,35 @@
    public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
        boolean overwrite, int bufferSize, short replication, long blockSize,
        Progressable progress) throws IOException {
-     throw new IOException("createNonRecursive unsupported for this filesystem "
-         + this.getClass());
+     return createNonRecursive(f, permission,
+         overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
+             : EnumSet.of(CreateFlag.CREATE), bufferSize,
+             replication, blockSize, progress);
    }
 
+   /**
+    * Opens an FSDataOutputStream at the indicated Path with write-progress
+    * reporting. Same as create(), except fails if parent directory doesn't
+    * already exist.
+    * @param f the file name to open
+    * @param permission
+    * @param flags {@link CreateFlag}s to use for this stream.
+    * @param bufferSize the size of the buffer to be used.
+    * @param replication required block replication for the file.
+    * @param blockSize
+    * @param progress
+    * @throws IOException
+    * @see #setPermission(Path, FsPermission)
+    * @deprecated API only for 0.20-append
+    */
+    @Deprecated
+    public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
+        EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
+        Progressable progress) throws IOException {
+      throw new IOException("createNonRecursive unsupported for this filesystem "
+          + this.getClass());
+    }
+
   /**
    * Creates the given Path as a brand-new zero-length file.  If
    * create fails, or if it already existed, return false.
@@ -1939,8 +1990,12 @@
     return getFileStatus(f).getBlockSize();
   }
 
-  /** Return the number of bytes that large input files should be optimally
-   * be split into to minimize i/o time. */
+  /**
+   * Return the number of bytes that large input files should be optimally
+   * be split into to minimize i/o time.
+   * @deprecated use {@link #getDefaultBlockSize(Path)} instead
+   */
+  @Deprecated
   public long getDefaultBlockSize() {
     // default to 32MB: large enough to minimize the impact of seeks
     return getConf().getLong("fs.local.block.size", 32 * 1024 * 1024);
@@ -1958,7 +2013,9 @@
 
   /**
    * Get the default replication.
+   * @deprecated use {@link #getDefaultReplication(Path)} instead
    */
+  @Deprecated
   public short getDefaultReplication() { return 1; }
 
   /**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 5028479..58492e1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -307,6 +307,12 @@
     return FileUtil.fullyDelete(f);
   }
  
+  /**
+   * {@inheritDoc}
+   *
+   * (<b>Note</b>: Returned list is not sorted in any given order,
+   * due to reliance on Java's {@link File#list()} API.)
+   */
   public FileStatus[] listStatus(Path f) throws IOException {
     File localf = pathToFile(f);
     FileStatus[] results;
@@ -316,7 +322,7 @@
     }
     if (localf.isFile()) {
       return new FileStatus[] {
-        new RawLocalFileStatus(localf, getDefaultBlockSize(), this) };
+        new RawLocalFileStatus(localf, getDefaultBlockSize(f), this) };
     }
 
     String[] names = localf.list();
@@ -444,7 +450,7 @@
   public FileStatus getFileStatus(Path f) throws IOException {
     File path = pathToFile(f);
     if (path.exists()) {
-      return new RawLocalFileStatus(pathToFile(f), getDefaultBlockSize(), this);
+      return new RawLocalFileStatus(pathToFile(f), getDefaultBlockSize(f), this);
     } else {
       throw new FileNotFoundException("File " + f + " does not exist");
     }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
index 8a05a55..5935863 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
@@ -34,7 +34,6 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.CompressionCodecFactory;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -136,7 +135,7 @@
 
   protected class TextRecordInputStream extends InputStream {
     SequenceFile.Reader r;
-    WritableComparable<?> key;
+    Writable key;
     Writable val;
 
     DataInputBuffer inbuf;
@@ -148,7 +147,7 @@
       r = new SequenceFile.Reader(lconf, 
           SequenceFile.Reader.file(fpath));
       key = ReflectionUtils.newInstance(
-          r.getKeyClass().asSubclass(WritableComparable.class), lconf);
+          r.getKeyClass().asSubclass(Writable.class), lconf);
       val = ReflectionUtils.newInstance(
           r.getValueClass().asSubclass(Writable.class), lconf);
       inbuf = new DataInputBuffer();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 20932ee..36771e3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -233,6 +233,11 @@
       fsState.resolve(getUriPath(f), true);
     return res.isInternalDir() ? null : res.targetFileSystem.getHomeDirectory();
   }
+  
+  @Override
+  public String getCanonicalServiceName() {
+    return null;
+  }
 
   @Override
   public URI getUri() {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
index ef05456..a4ed255 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
@@ -29,6 +29,7 @@
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.ha.HAZKUtil.ZKAuthInfo;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.zookeeper.data.ACL;
 import org.apache.zookeeper.KeeperException;
@@ -81,9 +82,15 @@
    */
   public interface ActiveStandbyElectorCallback {
     /**
-     * This method is called when the app becomes the active leader
+     * This method is called when the app becomes the active leader.
+     * If the service fails to become active, it should throw
+     * ServiceFailedException. This will cause the elector to
+     * sleep for a short period, then re-join the election.
+     * 
+     * Callback implementations are expected to manage their own
+     * timeouts (e.g. when making an RPC to a remote node).
      */
-    void becomeActive();
+    void becomeActive() throws ServiceFailedException;
 
     /**
      * This method is called when the app becomes a standby
@@ -134,7 +141,8 @@
 
   public static final Log LOG = LogFactory.getLog(ActiveStandbyElector.class);
 
-  private static final int NUM_RETRIES = 3;
+  static int NUM_RETRIES = 3;
+  private static final int SLEEP_AFTER_FAILURE_TO_BECOME_ACTIVE = 1000;
 
   private static enum ConnectionState {
     DISCONNECTED, CONNECTED, TERMINATED
@@ -154,6 +162,7 @@
   private final String zkHostPort;
   private final int zkSessionTimeout;
   private final List<ACL> zkAcl;
+  private final List<ZKAuthInfo> zkAuthInfo;
   private byte[] appData;
   private final String zkLockFilePath;
   private final String zkBreadCrumbPath;
@@ -185,6 +194,8 @@
    *          znode under which to create the lock
    * @param acl
    *          ZooKeeper ACL's
+   * @param authInfo a list of authentication credentials to add to the
+   *                 ZK connection
    * @param app
    *          reference to callback interface object
    * @throws IOException
@@ -192,6 +203,7 @@
    */
   public ActiveStandbyElector(String zookeeperHostPorts,
       int zookeeperSessionTimeout, String parentZnodeName, List<ACL> acl,
+      List<ZKAuthInfo> authInfo,
       ActiveStandbyElectorCallback app) throws IOException,
       HadoopIllegalArgumentException {
     if (app == null || acl == null || parentZnodeName == null
@@ -201,6 +213,7 @@
     zkHostPort = zookeeperHostPorts;
     zkSessionTimeout = zookeeperSessionTimeout;
     zkAcl = acl;
+    zkAuthInfo = authInfo;
     appClient = app;
     znodeWorkingDir = parentZnodeName;
     zkLockFilePath = znodeWorkingDir + "/" + LOCK_FILENAME;
@@ -227,8 +240,6 @@
   public synchronized void joinElection(byte[] data)
       throws HadoopIllegalArgumentException {
     
-    LOG.debug("Attempting active election");
-
     if (data == null) {
       throw new HadoopIllegalArgumentException("data cannot be null");
     }
@@ -236,6 +247,7 @@
     appData = new byte[data.length];
     System.arraycopy(data, 0, appData, 0, data.length);
 
+    LOG.debug("Attempting active election for " + this);
     joinElectionInternal();
   }
   
@@ -259,6 +271,9 @@
    */
   public synchronized void ensureParentZNode()
       throws IOException, InterruptedException {
+    Preconditions.checkState(!wantToBeInElection,
+        "ensureParentZNode() may not be called while in the election");
+
     String pathParts[] = znodeWorkingDir.split("/");
     Preconditions.checkArgument(pathParts.length >= 1 &&
         "".equals(pathParts[0]),
@@ -292,6 +307,9 @@
    */
   public synchronized void clearParentZNode()
       throws IOException, InterruptedException {
+    Preconditions.checkState(!wantToBeInElection,
+        "clearParentZNode() may not be called while in the election");
+
     try {
       LOG.info("Recursively deleting " + znodeWorkingDir + " from ZK...");
 
@@ -360,7 +378,7 @@
         createConnection();
       }
       Stat stat = new Stat();
-      return zkClient.getData(zkLockFilePath, false, stat);
+      return getDataWithRetries(zkLockFilePath, false, stat);
     } catch(KeeperException e) {
       Code code = e.code();
       if (isNodeDoesNotExist(code)) {
@@ -380,13 +398,17 @@
       String name) {
     if (isStaleClient(ctx)) return;
     LOG.debug("CreateNode result: " + rc + " for path: " + path
-        + " connectionState: " + zkConnectionState);
+        + " connectionState: " + zkConnectionState +
+        "  for " + this);
 
     Code code = Code.get(rc);
     if (isSuccess(code)) {
       // we successfully created the znode. we are the leader. start monitoring
-      becomeActive();
-      monitorActiveStatus();
+      if (becomeActive()) {
+        monitorActiveStatus();
+      } else {
+        reJoinElectionAfterFailureToBecomeActive();
+      }
       return;
     }
 
@@ -433,8 +455,13 @@
   public synchronized void processResult(int rc, String path, Object ctx,
       Stat stat) {
     if (isStaleClient(ctx)) return;
+    
+    assert wantToBeInElection :
+        "Got a StatNode result after quitting election";
+    
     LOG.debug("StatNode result: " + rc + " for path: " + path
-        + " connectionState: " + zkConnectionState);
+        + " connectionState: " + zkConnectionState + " for " + this);
+        
 
     Code code = Code.get(rc);
     if (isSuccess(code)) {
@@ -442,7 +469,9 @@
       // creation was retried
       if (stat.getEphemeralOwner() == zkClient.getSessionId()) {
         // we own the lock znode. so we are the leader
-        becomeActive();
+        if (!becomeActive()) {
+          reJoinElectionAfterFailureToBecomeActive();
+        }
       } else {
         // we dont own the lock znode. so we are a standby.
         becomeStandby();
@@ -470,20 +499,37 @@
       }
       errorMessage = errorMessage
           + ". Not retrying further znode monitoring connection errors.";
+    } else if (isSessionExpired(code)) {
+      // This isn't fatal - the client Watcher will re-join the election
+      LOG.warn("Lock monitoring failed because session was lost");
+      return;
     }
 
     fatalError(errorMessage);
   }
 
   /**
-   * interface implementation of Zookeeper watch events (connection and node)
+   * We failed to become active. Re-join the election, but
+   * sleep for a few seconds after terminating our existing
+   * session, so that other nodes have a chance to become active.
+   * The failure to become active is already logged inside
+   * becomeActive().
+   */
+  private void reJoinElectionAfterFailureToBecomeActive() {
+    reJoinElection(SLEEP_AFTER_FAILURE_TO_BECOME_ACTIVE);
+  }
+
+  /**
+   * interface implementation of Zookeeper watch events (connection and node),
+   * proxied by {@link WatcherWithClientRef}.
    */
   synchronized void processWatchEvent(ZooKeeper zk, WatchedEvent event) {
     Event.EventType eventType = event.getType();
     if (isStaleClient(zk)) return;
     LOG.debug("Watcher event type: " + eventType + " with state:"
         + event.getState() + " for path:" + event.getPath()
-        + " connectionState: " + zkConnectionState);
+        + " connectionState: " + zkConnectionState
+        + " for " + this);
 
     if (eventType == Event.EventType.None) {
       // the connection state has changed
@@ -494,7 +540,8 @@
         // be undone
         ConnectionState prevConnectionState = zkConnectionState;
         zkConnectionState = ConnectionState.CONNECTED;
-        if (prevConnectionState == ConnectionState.DISCONNECTED) {
+        if (prevConnectionState == ConnectionState.DISCONNECTED &&
+            wantToBeInElection) {
           monitorActiveStatus();
         }
         break;
@@ -511,7 +558,7 @@
         // call listener to reconnect
         LOG.info("Session expired. Entering neutral mode and rejoining...");
         enterNeutralMode();
-        reJoinElection();
+        reJoinElection(0);
         break;
       default:
         fatalError("Unexpected Zookeeper watch event state: "
@@ -559,16 +606,21 @@
   protected synchronized ZooKeeper getNewZooKeeper() throws IOException {
     ZooKeeper zk = new ZooKeeper(zkHostPort, zkSessionTimeout, null);
     zk.register(new WatcherWithClientRef(zk));
+    for (ZKAuthInfo auth : zkAuthInfo) {
+      zk.addAuthInfo(auth.getScheme(), auth.getAuth());
+    }
     return zk;
   }
 
   private void fatalError(String errorMessage) {
+    LOG.fatal(errorMessage);
     reset();
     appClient.notifyFatalError(errorMessage);
   }
 
   private void monitorActiveStatus() {
-    LOG.debug("Monitoring active leader");
+    assert wantToBeInElection;
+    LOG.debug("Monitoring active leader for " + this);
     statRetryCount = 0;
     monitorLockNodeAsync();
   }
@@ -586,7 +638,7 @@
     createLockNodeAsync();
   }
 
-  private void reJoinElection() {
+  private void reJoinElection(int sleepTime) {
     LOG.info("Trying to re-establish ZK session");
     
     // Some of the test cases rely on expiring the ZK sessions and
@@ -599,12 +651,30 @@
     sessionReestablishLockForTests.lock();
     try {
       terminateConnection();
+      sleepFor(sleepTime);
+      
       joinElectionInternal();
     } finally {
       sessionReestablishLockForTests.unlock();
     }
   }
-  
+
+  /**
+   * Sleep for the given number of milliseconds.
+   * This is non-static, and separated out, so that unit tests
+   * can override the behavior not to sleep.
+   */
+  @VisibleForTesting
+  protected void sleepFor(int sleepMs) {
+    if (sleepMs > 0) {
+      try {
+        Thread.sleep(sleepMs);
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+      }
+    }
+  }
+
   @VisibleForTesting
   void preventSessionReestablishmentForTests() {
     sessionReestablishLockForTests.lock();
@@ -616,8 +686,12 @@
   }
   
   @VisibleForTesting
-  long getZKSessionIdForTests() {
-    return zkClient.getSessionId();
+  synchronized long getZKSessionIdForTests() {
+    if (zkClient != null) {
+      return zkClient.getSessionId();
+    } else {
+      return -1;
+    }
   }
   
   @VisibleForTesting
@@ -629,17 +703,13 @@
     int connectionRetryCount = 0;
     boolean success = false;
     while(!success && connectionRetryCount < NUM_RETRIES) {
-      LOG.debug("Establishing zookeeper connection");
+      LOG.debug("Establishing zookeeper connection for " + this);
       try {
         createConnection();
         success = true;
       } catch(IOException e) {
         LOG.warn(e);
-        try {
-          Thread.sleep(5000);
-        } catch(InterruptedException e1) {
-          LOG.warn(e1);
-        }
+        sleepFor(5000);
       }
       ++connectionRetryCount;
     }
@@ -647,14 +717,24 @@
   }
 
   private void createConnection() throws IOException {
+    if (zkClient != null) {
+      try {
+        zkClient.close();
+      } catch (InterruptedException e) {
+        throw new IOException("Interrupted while closing ZK",
+            e);
+      }
+      zkClient = null;
+    }
     zkClient = getNewZooKeeper();
+    LOG.debug("Created new connection for " + this);
   }
   
-  private void terminateConnection() {
+  void terminateConnection() {
     if (zkClient == null) {
       return;
     }
-    LOG.debug("Terminating ZK connection");
+    LOG.debug("Terminating ZK connection for " + this);
     ZooKeeper tempZk = zkClient;
     zkClient = null;
     try {
@@ -670,20 +750,24 @@
     terminateConnection();
   }
 
-  private void becomeActive() {
+  private boolean becomeActive() {
     assert wantToBeInElection;
-    if (state != State.ACTIVE) {
-      try {
-        Stat oldBreadcrumbStat = fenceOldActive();
-        writeBreadCrumbNode(oldBreadcrumbStat);
-      } catch (Exception e) {
-        LOG.warn("Exception handling the winning of election", e);
-        reJoinElection();
-        return;
-      }
-      LOG.debug("Becoming active");
-      state = State.ACTIVE;
+    if (state == State.ACTIVE) {
+      // already active
+      return true;
+    }
+    try {
+      Stat oldBreadcrumbStat = fenceOldActive();
+      writeBreadCrumbNode(oldBreadcrumbStat);
+      
+      LOG.debug("Becoming active for " + this);
       appClient.becomeActive();
+      state = State.ACTIVE;
+      return true;
+    } catch (Exception e) {
+      LOG.warn("Exception handling the winning of election", e);
+      // Caller will handle quitting and rejoining the election.
+      return false;
     }
   }
 
@@ -779,7 +863,7 @@
 
   private void becomeStandby() {
     if (state != State.STANDBY) {
-      LOG.debug("Becoming standby");
+      LOG.debug("Becoming standby for " + this);
       state = State.STANDBY;
       appClient.becomeStandby();
     }
@@ -787,7 +871,7 @@
 
   private void enterNeutralMode() {
     if (state != State.NEUTRAL) {
-      LOG.debug("Entering neutral mode");
+      LOG.debug("Entering neutral mode for " + this);
       state = State.NEUTRAL;
       appClient.enterNeutralMode();
     }
@@ -814,6 +898,15 @@
     });
   }
 
+  private byte[] getDataWithRetries(final String path, final boolean watch,
+      final Stat stat) throws InterruptedException, KeeperException {
+    return zkDoWithRetries(new ZKAction<byte[]>() {
+      public byte[] run() throws KeeperException, InterruptedException {
+        return zkClient.getData(path, watch, stat);
+      }
+    });
+  }
+
   private Stat setDataWithRetries(final String path, final byte[] data,
       final int version) throws InterruptedException, KeeperException {
     return zkDoWithRetries(new ZKAction<Stat>() {
@@ -884,8 +977,14 @@
 
     @Override
     public void process(WatchedEvent event) {
-      ActiveStandbyElector.this.processWatchEvent(
-          zk, event);
+      try {
+        ActiveStandbyElector.this.processWatchEvent(
+            zk, event);
+      } catch (Throwable t) {
+        fatalError(
+            "Failed to process watcher event " + event + ": " +
+            StringUtils.stringifyException(t));
+      }
     }
   }
 
@@ -913,5 +1012,13 @@
     }
     return false;
   }
+  
+  @Override
+  public String toString() {
+    return "elector id=" + System.identityHashCode(this) +
+      " appData=" +
+      ((appData == null) ? "null" : StringUtils.byteToHexString(appData)) + 
+      " cb=" + appClient;
+  }
 
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
index 22f245a..b1d2c7e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
@@ -27,6 +27,8 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
 import org.apache.hadoop.ipc.RPC;
 
 import com.google.common.base.Preconditions;
@@ -48,9 +50,12 @@
   
   private final Configuration conf;
 
+  private final RequestSource requestSource;
   
-  public FailoverController(Configuration conf) {
+  public FailoverController(Configuration conf,
+      RequestSource source) {
     this.conf = conf;
+    this.requestSource = source;
     
     this.gracefulFenceTimeout = getGracefulFenceTimeout(conf);
     this.rpcTimeoutToNewActive = getRpcTimeoutToNewActive(conf);
@@ -100,7 +105,7 @@
       toSvcStatus = toSvc.getServiceStatus();
     } catch (IOException e) {
       String msg = "Unable to get service state for " + target;
-      LOG.error(msg, e);
+      LOG.error(msg + ": " + e.getLocalizedMessage());
       throw new FailoverFailedException(msg, e);
     }
 
@@ -122,7 +127,7 @@
     }
 
     try {
-      HAServiceProtocolHelper.monitorHealth(toSvc);
+      HAServiceProtocolHelper.monitorHealth(toSvc, createReqInfo());
     } catch (HealthCheckFailedException hce) {
       throw new FailoverFailedException(
           "Can't failover to an unhealthy service", hce);
@@ -132,7 +137,10 @@
     }
   }
   
-  
+  private StateChangeRequestInfo createReqInfo() {
+    return new StateChangeRequestInfo(requestSource);
+  }
+
   /**
    * Try to get the HA state of the node at the given address. This
    * function is guaranteed to be "quick" -- ie it has a short timeout
@@ -143,7 +151,7 @@
     HAServiceProtocol proxy = null;
     try {
       proxy = svc.getProxy(conf, gracefulFenceTimeout);
-      proxy.transitionToStandby();
+      proxy.transitionToStandby(createReqInfo());
       return true;
     } catch (ServiceFailedException sfe) {
       LOG.warn("Unable to gracefully make " + svc + " standby (" +
@@ -198,7 +206,8 @@
     Throwable cause = null;
     try {
       HAServiceProtocolHelper.transitionToActive(
-          toSvc.getProxy(conf, rpcTimeoutToNewActive));
+          toSvc.getProxy(conf, rpcTimeoutToNewActive),
+          createReqInfo());
     } catch (ServiceFailedException sfe) {
       LOG.error("Unable to make " + toSvc + " active (" +
           sfe.getMessage() + "). Failing back.");
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
index a3d898c..7d85c01 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
@@ -19,11 +19,11 @@
 
 import java.io.IOException;
 import java.io.PrintStream;
+import java.util.Arrays;
 import java.util.Map;
 
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.GnuParser;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.logging.Log;
@@ -33,9 +33,12 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
 
 /**
@@ -49,6 +52,13 @@
   
   private static final String FORCEFENCE  = "forcefence";
   private static final String FORCEACTIVE = "forceactive";
+  
+  /**
+   * Undocumented flag which allows an administrator to use manual failover
+   * state transitions even when auto-failover is enabled. This is an unsafe
+   * operation, which is why it is not documented in the usage below.
+   */
+  private static final String FORCEMANUAL = "forcemanual";
   private static final Log LOG = LogFactory.getLog(HAAdmin.class);
 
   private int rpcTimeoutForChecks = -1;
@@ -79,6 +89,7 @@
   /** Output stream for errors, for use in tests */
   protected PrintStream errOut = System.err;
   PrintStream out = System.out;
+  private RequestSource requestSource = RequestSource.REQUEST_BY_USER;
 
   protected abstract HAServiceTarget resolveTarget(String string);
 
@@ -106,63 +117,83 @@
     errOut.println("Usage: HAAdmin [" + cmd + " " + usage.args + "]");
   }
 
-  private int transitionToActive(final String[] argv)
+  private int transitionToActive(final CommandLine cmd)
       throws IOException, ServiceFailedException {
-    if (argv.length != 2) {
+    String[] argv = cmd.getArgs();
+    if (argv.length != 1) {
       errOut.println("transitionToActive: incorrect number of arguments");
       printUsage(errOut, "-transitionToActive");
       return -1;
     }
-    
-    HAServiceProtocol proto = resolveTarget(argv[1]).getProxy(
+    HAServiceTarget target = resolveTarget(argv[0]);
+    if (!checkManualStateManagementOK(target)) {
+      return -1;
+    }
+    HAServiceProtocol proto = target.getProxy(
         getConf(), 0);
-    HAServiceProtocolHelper.transitionToActive(proto);
+    HAServiceProtocolHelper.transitionToActive(proto, createReqInfo());
     return 0;
   }
 
-  private int transitionToStandby(final String[] argv)
+  private int transitionToStandby(final CommandLine cmd)
       throws IOException, ServiceFailedException {
-    if (argv.length != 2) {
+    String[] argv = cmd.getArgs();
+    if (argv.length != 1) {
       errOut.println("transitionToStandby: incorrect number of arguments");
       printUsage(errOut, "-transitionToStandby");
       return -1;
     }
     
-    HAServiceProtocol proto = resolveTarget(argv[1]).getProxy(
-        getConf(), 0);
-    HAServiceProtocolHelper.transitionToStandby(proto);
-    return 0;
-  }
-
-  private int failover(final String[] argv)
-      throws IOException, ServiceFailedException {
-    boolean forceFence = false;
-    boolean forceActive = false;
-
-    Options failoverOpts = new Options();
-    // "-failover" isn't really an option but we need to add
-    // it to appease CommandLineParser
-    failoverOpts.addOption("failover", false, "failover");
-    failoverOpts.addOption(FORCEFENCE, false, "force fencing");
-    failoverOpts.addOption(FORCEACTIVE, false, "force failover");
-
-    CommandLineParser parser = new GnuParser();
-    CommandLine cmd;
-
-    try {
-      cmd = parser.parse(failoverOpts, argv);
-      forceFence = cmd.hasOption(FORCEFENCE);
-      forceActive = cmd.hasOption(FORCEACTIVE);
-    } catch (ParseException pe) {
-      errOut.println("failover: incorrect arguments");
-      printUsage(errOut, "-failover");
+    HAServiceTarget target = resolveTarget(argv[0]);
+    if (!checkManualStateManagementOK(target)) {
       return -1;
     }
-    
+    HAServiceProtocol proto = target.getProxy(
+        getConf(), 0);
+    HAServiceProtocolHelper.transitionToStandby(proto, createReqInfo());
+    return 0;
+  }
+  /**
+   * Ensure that we are allowed to manually manage the HA state of the target
+   * service. If automatic failover is configured, then the automatic
+   * failover controllers should be doing state management, and it is generally
+   * an error to use the HAAdmin command line to do so.
+   * 
+   * @param target the target to check
+   * @return true if manual state management is allowed
+   */
+  private boolean checkManualStateManagementOK(HAServiceTarget target) {
+    if (target.isAutoFailoverEnabled()) {
+      if (requestSource != RequestSource.REQUEST_BY_USER_FORCED) {
+        errOut.println(
+            "Automatic failover is enabled for " + target + "\n" +
+            "Refusing to manually manage HA state, since it may cause\n" +
+            "a split-brain scenario or other incorrect state.\n" +
+            "If you are very sure you know what you are doing, please \n" +
+            "specify the " + FORCEMANUAL + " flag.");
+        return false;
+      } else {
+        LOG.warn("Proceeding with manual HA state management even though\n" +
+            "automatic failover is enabled for " + target);
+        return true;
+      }
+    }
+    return true;
+  }
+
+  private StateChangeRequestInfo createReqInfo() {
+    return new StateChangeRequestInfo(requestSource);
+  }
+
+  private int failover(CommandLine cmd)
+      throws IOException, ServiceFailedException {
+    boolean forceFence = cmd.hasOption(FORCEFENCE);
+    boolean forceActive = cmd.hasOption(FORCEACTIVE);
+
     int numOpts = cmd.getOptions() == null ? 0 : cmd.getOptions().length;
     final String[] args = cmd.getArgs();
 
-    if (numOpts > 2 || args.length != 2) {
+    if (numOpts > 3 || args.length != 2) {
       errOut.println("failover: incorrect arguments");
       printUsage(errOut, "-failover");
       return -1;
@@ -171,7 +202,30 @@
     HAServiceTarget fromNode = resolveTarget(args[0]);
     HAServiceTarget toNode = resolveTarget(args[1]);
     
-    FailoverController fc = new FailoverController(getConf());
+    // Check that auto-failover is consistently configured for both nodes.
+    Preconditions.checkState(
+        fromNode.isAutoFailoverEnabled() ==
+          toNode.isAutoFailoverEnabled(),
+          "Inconsistent auto-failover configs between %s and %s!",
+          fromNode, toNode);
+    
+    if (fromNode.isAutoFailoverEnabled()) {
+      if (forceFence || forceActive) {
+        // -forceActive doesn't make sense with auto-HA, since, if the node
+        // is not healthy, then its ZKFC will immediately quit the election
+        // again the next time a health check runs.
+        //
+        // -forceFence doesn't seem to have any real use cases with auto-HA
+        // so it isn't implemented.
+        errOut.println(FORCEFENCE + " and " + FORCEACTIVE + " flags not " +
+            "supported with auto-failover enabled.");
+        return -1;
+      }
+      return gracefulFailoverThroughZKFCs(toNode);
+    }
+    
+    FailoverController fc = new FailoverController(getConf(),
+        requestSource);
     
     try {
       fc.failover(fromNode, toNode, forceFence, forceActive); 
@@ -182,19 +236,44 @@
     }
     return 0;
   }
+  
 
-  private int checkHealth(final String[] argv)
+  /**
+   * Initiate a graceful failover by talking to the target node's ZKFC.
+   * This sends an RPC to the ZKFC, which coordinates the failover.
+   * 
+   * @param toNode the node to fail to
+   * @return status code (0 for success)
+   * @throws IOException if failover does not succeed
+   */
+  private int gracefulFailoverThroughZKFCs(HAServiceTarget toNode)
+      throws IOException {
+
+    int timeout = FailoverController.getRpcTimeoutToNewActive(getConf());
+    ZKFCProtocol proxy = toNode.getZKFCProxy(getConf(), timeout);
+    try {
+      proxy.gracefulFailover();
+      out.println("Failover to " + toNode + " successful");
+    } catch (ServiceFailedException sfe) {
+      errOut.println("Failover failed: " + sfe.getLocalizedMessage());
+      return -1;
+    }
+
+    return 0;
+  }
+
+  private int checkHealth(final CommandLine cmd)
       throws IOException, ServiceFailedException {
-    if (argv.length != 2) {
+    String[] argv = cmd.getArgs();
+    if (argv.length != 1) {
       errOut.println("checkHealth: incorrect number of arguments");
       printUsage(errOut, "-checkHealth");
       return -1;
     }
-    
-    HAServiceProtocol proto = resolveTarget(argv[1]).getProxy(
+    HAServiceProtocol proto = resolveTarget(argv[0]).getProxy(
         getConf(), rpcTimeoutForChecks);
     try {
-      HAServiceProtocolHelper.monitorHealth(proto);
+      HAServiceProtocolHelper.monitorHealth(proto, createReqInfo());
     } catch (HealthCheckFailedException e) {
       errOut.println("Health check failed: " + e.getLocalizedMessage());
       return -1;
@@ -202,15 +281,16 @@
     return 0;
   }
 
-  private int getServiceState(final String[] argv)
+  private int getServiceState(final CommandLine cmd)
       throws IOException, ServiceFailedException {
-    if (argv.length != 2) {
+    String[] argv = cmd.getArgs();
+    if (argv.length != 1) {
       errOut.println("getServiceState: incorrect number of arguments");
       printUsage(errOut, "-getServiceState");
       return -1;
     }
 
-    HAServiceProtocol proto = resolveTarget(argv[1]).getProxy(
+    HAServiceProtocol proto = resolveTarget(argv[0]).getProxy(
         getConf(), rpcTimeoutForChecks);
     out.println(proto.getServiceStatus().getState());
     return 0;
@@ -263,26 +343,101 @@
       printUsage(errOut);
       return -1;
     }
-
-    if ("-transitionToActive".equals(cmd)) {
-      return transitionToActive(argv);
-    } else if ("-transitionToStandby".equals(cmd)) {
-      return transitionToStandby(argv);
-    } else if ("-failover".equals(cmd)) {
-      return failover(argv);
-    } else if ("-getServiceState".equals(cmd)) {
-      return getServiceState(argv);
-    } else if ("-checkHealth".equals(cmd)) {
-      return checkHealth(argv);
-    } else if ("-help".equals(cmd)) {
-      return help(argv);
-    } else {
+    
+    if (!USAGE.containsKey(cmd)) {
       errOut.println(cmd.substring(1) + ": Unknown command");
       printUsage(errOut);
       return -1;
+    }
+    
+    Options opts = new Options();
+
+    // Add command-specific options
+    if ("-failover".equals(cmd)) {
+      addFailoverCliOpts(opts);
+    }
+    // Mutative commands take FORCEMANUAL option
+    if ("-transitionToActive".equals(cmd) ||
+        "-transitionToStandby".equals(cmd) ||
+        "-failover".equals(cmd)) {
+      opts.addOption(FORCEMANUAL, false,
+          "force manual control even if auto-failover is enabled");
+    }
+         
+    CommandLine cmdLine = parseOpts(cmd, opts, argv);
+    if (cmdLine == null) {
+      // error already printed
+      return -1;
+    }
+    
+    if (cmdLine.hasOption(FORCEMANUAL)) {
+      if (!confirmForceManual()) {
+        LOG.fatal("Aborted");
+        return -1;
+      }
+      // Instruct the NNs to honor this request even if they're
+      // configured for manual failover.
+      requestSource = RequestSource.REQUEST_BY_USER_FORCED;
+    }
+
+    if ("-transitionToActive".equals(cmd)) {
+      return transitionToActive(cmdLine);
+    } else if ("-transitionToStandby".equals(cmd)) {
+      return transitionToStandby(cmdLine);
+    } else if ("-failover".equals(cmd)) {
+      return failover(cmdLine);
+    } else if ("-getServiceState".equals(cmd)) {
+      return getServiceState(cmdLine);
+    } else if ("-checkHealth".equals(cmd)) {
+      return checkHealth(cmdLine);
+    } else if ("-help".equals(cmd)) {
+      return help(argv);
+    } else {
+      // we already checked command validity above, so getting here
+      // would be a coding error
+      throw new AssertionError("Should not get here, command: " + cmd);
     } 
   }
   
+  private boolean confirmForceManual() throws IOException {
+     return ToolRunner.confirmPrompt(
+        "You have specified the " + FORCEMANUAL + " flag. This flag is " +
+        "dangerous, as it can induce a split-brain scenario that WILL " +
+        "CORRUPT your HDFS namespace, possibly irrecoverably.\n" +
+        "\n" +
+        "It is recommended not to use this flag, but instead to shut down the " +
+        "cluster and disable automatic failover if you prefer to manually " +
+        "manage your HA state.\n" +
+        "\n" +
+        "You may abort safely by answering 'n' or hitting ^C now.\n" +
+        "\n" +
+        "Are you sure you want to continue?");
+  }
+
+  /**
+   * Add CLI options which are specific to the failover command and no
+   * others.
+   */
+  private void addFailoverCliOpts(Options failoverOpts) {
+    failoverOpts.addOption(FORCEFENCE, false, "force fencing");
+    failoverOpts.addOption(FORCEACTIVE, false, "force failover");
+    // Don't add FORCEMANUAL, since that's added separately for all commands
+    // that change state.
+  }
+  
+  private CommandLine parseOpts(String cmdName, Options opts, String[] argv) {
+    try {
+      // Strip off the first arg, since that's just the command name
+      argv = Arrays.copyOfRange(argv, 1, argv.length); 
+      return new GnuParser().parse(opts, argv);
+    } catch (ParseException pe) {
+      errOut.println(cmdName.substring(1) +
+          ": incorrect arguments");
+      printUsage(errOut, cmdName);
+      return null;
+    }
+  }
+  
   private int help(String[] argv) {
     if (argv.length != 2) {
       printUsage(errOut, "-help");
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
index b086382..d4ae089 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
@@ -60,6 +60,31 @@
       return name;
     }
   }
+  
+  public static enum RequestSource {
+    REQUEST_BY_USER,
+    REQUEST_BY_USER_FORCED,
+    REQUEST_BY_ZKFC;
+  }
+  
+  /**
+   * Information describing the source for a request to change state.
+   * This is used to differentiate requests from automatic vs CLI
+   * failover controllers, and in the future may include epoch
+   * information.
+   */
+  public static class StateChangeRequestInfo {
+    private final RequestSource source;
+
+    public StateChangeRequestInfo(RequestSource source) {
+      super();
+      this.source = source;
+    }
+
+    public RequestSource getSource() {
+      return source;
+    }
+  }
 
   /**
    * Monitor the health of service. This periodically called by the HA
@@ -95,7 +120,8 @@
    * @throws IOException
    *           if other errors happen
    */
-  public void transitionToActive() throws ServiceFailedException,
+  public void transitionToActive(StateChangeRequestInfo reqInfo)
+                                   throws ServiceFailedException,
                                           AccessControlException,
                                           IOException;
 
@@ -110,7 +136,8 @@
    * @throws IOException
    *           if other errors happen
    */
-  public void transitionToStandby() throws ServiceFailedException,
+  public void transitionToStandby(StateChangeRequestInfo reqInfo)
+                                    throws ServiceFailedException,
                                            AccessControlException,
                                            IOException;
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocolHelper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocolHelper.java
index b8ee717..58d4a7f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocolHelper.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocolHelper.java
@@ -21,6 +21,7 @@
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.ipc.RemoteException;
 
 /**
@@ -30,7 +31,8 @@
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class HAServiceProtocolHelper {
-  public static void monitorHealth(HAServiceProtocol svc)
+  public static void monitorHealth(HAServiceProtocol svc,
+      StateChangeRequestInfo reqInfo)
       throws IOException {
     try {
       svc.monitorHealth();
@@ -39,19 +41,21 @@
     }
   }
 
-  public static void transitionToActive(HAServiceProtocol svc)
+  public static void transitionToActive(HAServiceProtocol svc,
+      StateChangeRequestInfo reqInfo)
       throws IOException {
     try {
-      svc.transitionToActive();
+      svc.transitionToActive(reqInfo);
     } catch (RemoteException e) {
       throw e.unwrapRemoteException(ServiceFailedException.class);
     }
   }
 
-  public static void transitionToStandby(HAServiceProtocol svc)
+  public static void transitionToStandby(HAServiceProtocol svc,
+      StateChangeRequestInfo reqInfo)
       throws IOException {
     try {
-      svc.transitionToStandby();
+      svc.transitionToStandby(reqInfo);
     } catch (RemoteException e) {
       throw e.unwrapRemoteException(ServiceFailedException.class);
     }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java
index 00edfa0..56678b4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java
@@ -28,6 +28,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.ha.protocolPB.HAServiceProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ha.protocolPB.ZKFCProtocolClientSideTranslatorPB;
 import org.apache.hadoop.net.NetUtils;
 
 import com.google.common.collect.Maps;
@@ -49,6 +50,11 @@
   public abstract InetSocketAddress getAddress();
 
   /**
+   * @return the IPC address of the ZKFC on the target node
+   */
+  public abstract InetSocketAddress getZKFCAddress();
+
+  /**
    * @return a Fencer implementation configured for this target node
    */
   public abstract NodeFencer getFencer();
@@ -76,6 +82,20 @@
         confCopy, factory, timeoutMs);
   }
   
+  /**
+   * @return a proxy to the ZKFC which is associated with this HA service.
+   */
+  public ZKFCProtocol getZKFCProxy(Configuration conf, int timeoutMs)
+      throws IOException {
+    Configuration confCopy = new Configuration(conf);
+    // Lower the timeout so we quickly fail to connect
+    confCopy.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
+    SocketFactory factory = NetUtils.getDefaultSocketFactory(confCopy);
+    return new ZKFCProtocolClientSideTranslatorPB(
+        getZKFCAddress(),
+        confCopy, factory, timeoutMs);
+  }
+  
   public final Map<String, String> getFencingParameters() {
     Map<String, String> ret = Maps.newHashMap();
     addFencingParameters(ret);
@@ -99,4 +119,11 @@
     ret.put(HOST_SUBST_KEY, getAddress().getHostName());
     ret.put(PORT_SUBST_KEY, String.valueOf(getAddress().getPort()));
   }
+
+  /**
+   * @return true if auto failover should be considered enabled
+   */
+  public boolean isAutoFailoverEnabled() {
+    return false;
+  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAZKUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAZKUtil.java
new file mode 100644
index 0000000..093b878
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAZKUtil.java
@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.zookeeper.ZooDefs;
+import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Id;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Splitter;
+import com.google.common.collect.Lists;
+import com.google.common.io.Files;
+
+/**
+ * Utilities for working with ZooKeeper.
+ */
+@InterfaceAudience.Private
+public class HAZKUtil {
+  
+  /**
+   * Parse ACL permission string, partially borrowed from
+   * ZooKeeperMain private method
+   */
+  private static int getPermFromString(String permString) {
+    int perm = 0;
+    for (int i = 0; i < permString.length(); i++) {
+      char c = permString.charAt(i); 
+      switch (c) {
+      case 'r':
+        perm |= ZooDefs.Perms.READ;
+        break;
+      case 'w':
+        perm |= ZooDefs.Perms.WRITE;
+        break;
+      case 'c':
+        perm |= ZooDefs.Perms.CREATE;
+        break;
+      case 'd':
+        perm |= ZooDefs.Perms.DELETE;
+        break;
+      case 'a':
+        perm |= ZooDefs.Perms.ADMIN;
+        break;
+      default:
+        throw new BadAclFormatException(
+            "Invalid permission '" + c + "' in permission string '" +
+            permString + "'");
+      }
+    }
+    return perm;
+  }
+
+  /**
+   * Parse comma separated list of ACL entries to secure generated nodes, e.g.
+   * <code>sasl:hdfs/host1@MY.DOMAIN:cdrwa,sasl:hdfs/host2@MY.DOMAIN:cdrwa</code>
+   *
+   * @return ACL list
+   * @throws HadoopIllegalArgumentException if an ACL is invalid
+   */
+  public static List<ACL> parseACLs(String aclString) {
+    List<ACL> acl = Lists.newArrayList();
+    if (aclString == null) {
+      return acl;
+    }
+    
+    List<String> aclComps = Lists.newArrayList(
+        Splitter.on(',').omitEmptyStrings().trimResults()
+        .split(aclString));
+    for (String a : aclComps) {
+      // from ZooKeeperMain private method
+      int firstColon = a.indexOf(':');
+      int lastColon = a.lastIndexOf(':');
+      if (firstColon == -1 || lastColon == -1 || firstColon == lastColon) {
+        throw new BadAclFormatException(
+            "ACL '" + a + "' not of expected form scheme:id:perm");
+      }
+
+      ACL newAcl = new ACL();
+      newAcl.setId(new Id(a.substring(0, firstColon), a.substring(
+          firstColon + 1, lastColon)));
+      newAcl.setPerms(getPermFromString(a.substring(lastColon + 1)));
+      acl.add(newAcl);
+    }
+    
+    return acl;
+  }
+  
+  /**
+   * Parse a comma-separated list of authentication mechanisms. Each
+   * such mechanism should be of the form 'scheme:auth' -- the same
+   * syntax used for the 'addAuth' command in the ZK CLI.
+   * 
+   * @param authString the comma-separated auth mechanisms
+   * @return a list of parsed authentications
+   */
+  public static List<ZKAuthInfo> parseAuth(String authString) {
+    List<ZKAuthInfo> ret = Lists.newArrayList();
+    if (authString == null) {
+      return ret;
+    }
+    
+    List<String> authComps = Lists.newArrayList(
+        Splitter.on(',').omitEmptyStrings().trimResults()
+        .split(authString));
+    
+    for (String comp : authComps) {
+      String parts[] = comp.split(":", 2);
+      if (parts.length != 2) {
+        throw new BadAuthFormatException(
+            "Auth '" + comp + "' not of expected form scheme:auth");
+      }
+      ret.add(new ZKAuthInfo(parts[0],
+          parts[1].getBytes(Charsets.UTF_8)));
+    }
+    return ret;
+  }
+  
+  /**
+   * Because ZK ACLs and authentication information may be secret,
+   * allow the configuration values to be indirected through a file
+   * by specifying the configuration as "@/path/to/file". If this
+   * syntax is used, this function will return the contents of the file
+   * as a String.
+   * 
+   * @param valInConf the value from the Configuration 
+   * @return either the same value, or the contents of the referenced
+   * file if the configured value starts with "@"
+   * @throws IOException if the file cannot be read
+   */
+  public static String resolveConfIndirection(String valInConf)
+      throws IOException {
+    if (valInConf == null) return null;
+    if (!valInConf.startsWith("@")) {
+      return valInConf;
+    }
+    String path = valInConf.substring(1).trim();
+    return Files.toString(new File(path), Charsets.UTF_8).trim();
+  }
+
+  /**
+   * An authentication token passed to ZooKeeper.addAuthInfo
+   */
+  static class ZKAuthInfo {
+    private final String scheme;
+    private final byte[] auth;
+    
+    public ZKAuthInfo(String scheme, byte[] auth) {
+      super();
+      this.scheme = scheme;
+      this.auth = auth;
+    }
+
+    String getScheme() {
+      return scheme;
+    }
+
+    byte[] getAuth() {
+      return auth;
+    }
+  }
+
+  static class BadAclFormatException extends HadoopIllegalArgumentException {
+    private static final long serialVersionUID = 1L;
+
+    public BadAclFormatException(String message) {
+      super(message);
+    }
+  }
+  
+  static class BadAuthFormatException extends HadoopIllegalArgumentException {
+    private static final long serialVersionUID = 1L;
+
+    public BadAuthFormatException(String message) {
+      super(message);
+    }
+  }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
index 7533529..a349626 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
@@ -22,6 +22,7 @@
 import java.util.LinkedList;
 import java.util.List;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -43,7 +44,8 @@
  * Classes which need callbacks should implement the {@link Callback}
  * interface.
  */
-class HealthMonitor {
+@InterfaceAudience.Private
+public class HealthMonitor {
   private static final Log LOG = LogFactory.getLog(
       HealthMonitor.class);
 
@@ -75,7 +77,8 @@
   private HAServiceStatus lastServiceState = new HAServiceStatus(
       HAServiceState.INITIALIZING);
   
-  enum State {
+  @InterfaceAudience.Private
+  public enum State {
     /**
      * The health monitor is still starting up.
      */
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCProtocol.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCProtocol.java
new file mode 100644
index 0000000..02342f4
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCProtocol.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.io.retry.Idempotent;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.KerberosInfo;
+
+import java.io.IOException;
+
+/**
+ * Protocol exposed by the ZKFailoverController, allowing for graceful
+ * failover.
+ */
+@KerberosInfo(
+    serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface ZKFCProtocol {
+  /**
+   * Initial version of the protocol
+   */
+  public static final long versionID = 1L;
+
+  /**
+   * Request that this service yield from the active node election for the
+   * specified time period.
+   * 
+   * If the node is not currently active, it simply prevents any attempts
+   * to become active for the specified time period. Otherwise, it first
+   * tries to transition the local service to standby state, and then quits
+   * the election.
+   * 
+   * If the attempt to transition to standby succeeds, then the ZKFC receiving
+   * this RPC will delete its own breadcrumb node in ZooKeeper. Thus, the
+   * next node to become active will not run any fencing process. Otherwise,
+   * the breadcrumb will be left, such that the next active will fence this
+   * node.
+   * 
+   * After the specified time period elapses, the node will attempt to re-join
+   * the election, provided that its service is healthy.
+   * 
+   * If the node has previously been instructed to cede active, and is still
+   * within the specified time period, the later command's time period will
+   * take precedence, resetting the timer.
+   * 
+   * A call to cedeActive which specifies a 0 or negative time period will
+   * allow the target node to immediately rejoin the election, so long as
+   * it is healthy.
+   *  
+   * @param millisToCede period for which the node should not attempt to
+   * become active
+   * @throws IOException if the operation fails
+   * @throws AccessControlException if the operation is disallowed
+   */
+  @Idempotent
+  public void cedeActive(int millisToCede)
+      throws IOException, AccessControlException;
+  
+  /**
+   * Request that this node try to become active through a graceful failover.
+   * 
+   * If the node is already active, this is a no-op and simply returns success
+   * without taking any further action.
+   * 
+   * If the node is not healthy, it will throw an exception indicating that it
+   * is not able to become active.
+   * 
+   * If the node is healthy and not active, it will try to initiate a graceful
+   * failover to become active, returning only when it has successfully become
+   * active. See {@link ZKFailoverController#gracefulFailoverToYou()} for the
+   * implementation details.
+   * 
+   * If the node fails to successfully coordinate the failover, throws an
+   * exception indicating the reason for failure.
+   * 
+   * @throws IOException if graceful failover fails
+   * @throws AccessControlException if the operation is disallowed
+   */
+  @Idempotent
+  public void gracefulFailover()
+      throws IOException, AccessControlException;
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java
new file mode 100644
index 0000000..2077a86
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.ZKFCProtocolService;
+import org.apache.hadoop.ha.protocolPB.ZKFCProtocolPB;
+import org.apache.hadoop.ha.protocolPB.ZKFCProtocolServerSideTranslatorPB;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RPC.Server;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+
+import com.google.protobuf.BlockingService;
+
+@InterfaceAudience.LimitedPrivate("HDFS")
+@InterfaceStability.Evolving
+public class ZKFCRpcServer implements ZKFCProtocol {
+
+  private static final int HANDLER_COUNT = 3;
+  private final ZKFailoverController zkfc;
+  private Server server;
+
+  ZKFCRpcServer(Configuration conf,
+      InetSocketAddress bindAddr,
+      ZKFailoverController zkfc,
+      PolicyProvider policy) throws IOException {
+    this.zkfc = zkfc;
+    
+    RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
+        ProtobufRpcEngine.class);
+    ZKFCProtocolServerSideTranslatorPB translator =
+        new ZKFCProtocolServerSideTranslatorPB(this);
+    BlockingService service = ZKFCProtocolService
+        .newReflectiveBlockingService(translator);
+    this.server = RPC.getServer(
+        ZKFCProtocolPB.class,
+        service, bindAddr.getHostName(),
+            bindAddr.getPort(), HANDLER_COUNT, false, conf,
+            null /*secretManager*/);
+    
+    // set service-level authorization security policy
+    if (conf.getBoolean(
+        CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
+      server.refreshServiceAcl(conf, policy);
+    }
+
+  }
+  
+  void start() {
+    this.server.start();
+  }
+
+  public InetSocketAddress getAddress() {
+    return server.getListenerAddress();
+  }
+
+  void stopAndJoin() throws InterruptedException {
+    this.server.stop();
+    this.server.join();
+  }
+  
+  @Override
+  public void cedeActive(int millisToCede) throws IOException,
+      AccessControlException {
+    zkfc.checkRpcAdminAccess();
+    zkfc.cedeActive(millisToCede);
+  }
+
+  @Override
+  public void gracefulFailover() throws IOException, AccessControlException {
+    zkfc.checkRpcAdminAccess();
+    zkfc.gracefulFailoverToYou();
+  }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index 9a50fe6..c02fe0d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -18,79 +18,143 @@
 package org.apache.hadoop.ha;
 
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.security.PrivilegedAction;
+import java.security.PrivilegedExceptionAction;
+import java.util.Collections;
 import java.util.List;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ha.ActiveStandbyElector.ActiveNotFoundException;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
+import org.apache.hadoop.ha.HAZKUtil.ZKAuthInfo;
 import org.apache.hadoop.ha.HealthMonitor.State;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooDefs.Ids;
+import org.apache.hadoop.util.ToolRunner;
 import org.apache.zookeeper.data.ACL;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import com.google.common.base.Throwables;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 @InterfaceAudience.LimitedPrivate("HDFS")
-public abstract class ZKFailoverController implements Tool {
+public abstract class ZKFailoverController {
 
   static final Log LOG = LogFactory.getLog(ZKFailoverController.class);
   
-  // TODO: this should be namespace-scoped
   public static final String ZK_QUORUM_KEY = "ha.zookeeper.quorum";
   private static final String ZK_SESSION_TIMEOUT_KEY = "ha.zookeeper.session-timeout.ms";
   private static final int ZK_SESSION_TIMEOUT_DEFAULT = 5*1000;
   private static final String ZK_PARENT_ZNODE_KEY = "ha.zookeeper.parent-znode";
+  public static final String ZK_ACL_KEY = "ha.zookeeper.acl";
+  private static final String ZK_ACL_DEFAULT = "world:anyone:rwcda";
+  public static final String ZK_AUTH_KEY = "ha.zookeeper.auth";
   static final String ZK_PARENT_ZNODE_DEFAULT = "/hadoop-ha";
 
+  /**
+   * All of the conf keys used by the ZKFC. This is used in order to allow
+   * them to be overridden on a per-nameservice or per-namenode basis.
+   */
+  protected static final String[] ZKFC_CONF_KEYS = new String[] {
+    ZK_QUORUM_KEY,
+    ZK_SESSION_TIMEOUT_KEY,
+    ZK_PARENT_ZNODE_KEY,
+    ZK_ACL_KEY,
+    ZK_AUTH_KEY
+  };
+  
+
   /** Unable to format the parent znode in ZK */
   static final int ERR_CODE_FORMAT_DENIED = 2;
   /** The parent znode doesn't exist in ZK */
   static final int ERR_CODE_NO_PARENT_ZNODE = 3;
   /** Fencing is not properly configured */
   static final int ERR_CODE_NO_FENCER = 4;
+  /** Automatic failover is not enabled */
+  static final int ERR_CODE_AUTO_FAILOVER_NOT_ENABLED = 5;
+  /** Cannot connect to ZooKeeper */
+  static final int ERR_CODE_NO_ZK = 6;
   
-  private Configuration conf;
+  protected Configuration conf;
+  private String zkQuorum;
+  protected final HAServiceTarget localTarget;
 
   private HealthMonitor healthMonitor;
   private ActiveStandbyElector elector;
-
-  private HAServiceTarget localTarget;
-
-  private String parentZnode;
+  protected ZKFCRpcServer rpcServer;
 
   private State lastHealthState = State.INITIALIZING;
 
   /** Set if a fatal error occurs */
   private String fatalError = null;
 
-  @Override
-  public void setConf(Configuration conf) {
+  /**
+   * A future nanotime before which the ZKFC will not join the election.
+   * This is used during graceful failover.
+   */
+  private long delayJoiningUntilNanotime = 0;
+
+  /** Executor on which {@link #scheduleRecheck(long)} schedules events */
+  private ScheduledExecutorService delayExecutor =
+    Executors.newScheduledThreadPool(1,
+        new ThreadFactoryBuilder().setDaemon(true)
+            .setNameFormat("ZKFC Delay timer #%d")
+            .build());
+
+  private ActiveAttemptRecord lastActiveAttemptRecord;
+  private Object activeAttemptRecordLock = new Object();
+
+  protected ZKFailoverController(Configuration conf, HAServiceTarget localTarget) {
+    this.localTarget = localTarget;
     this.conf = conf;
-    localTarget = getLocalTarget();
   }
   
 
   protected abstract byte[] targetToData(HAServiceTarget target);
-  protected abstract HAServiceTarget getLocalTarget();  
   protected abstract HAServiceTarget dataToTarget(byte[] data);
+  protected abstract void loginAsFCUser() throws IOException;
+  protected abstract void checkRpcAdminAccess()
+      throws AccessControlException, IOException;
+  protected abstract InetSocketAddress getRpcAddressToBindTo();
+  protected abstract PolicyProvider getPolicyProvider();
 
+  /**
+   * Return the name of a znode inside the configured parent znode in which
+   * the ZKFC will do all of its work. This is so that multiple federated
+   * nameservices can run on the same ZK quorum without having to manually
+   * configure them to separate subdirectories.
+   */
+  protected abstract String getScopeInsideParentNode();
 
-  @Override
-  public Configuration getConf() {
-    return conf;
+  public HAServiceTarget getLocalTarget() {
+    return localTarget;
   }
-
-  @Override
+  
   public int run(final String[] args) throws Exception {
-    // TODO: need to hook DFS here to find the NN keytab info, etc,
-    // similar to what DFSHAAdmin does. Annoying that this is in common.
+    if (!localTarget.isAutoFailoverEnabled()) {
+      LOG.fatal("Automatic failover is not enabled for " + localTarget + "." +
+          " Please ensure that automatic failover is enabled in the " +
+          "configuration before running the ZK failover controller.");
+      return ERR_CODE_AUTO_FAILOVER_NOT_ENABLED;
+    }
+    loginAsFCUser();
     try {
       return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction<Integer>() {
         @Override
@@ -99,6 +163,10 @@
             return doRun(args);
           } catch (Exception t) {
             throw new RuntimeException(t);
+          } finally {
+            if (elector != null) {
+              elector.terminateConnection();
+            }
           }
         }
       });
@@ -107,6 +175,7 @@
     }
   }
   
+
   private int doRun(String[] args)
       throws HadoopIllegalArgumentException, IOException, InterruptedException {
     initZK();
@@ -129,11 +198,23 @@
       }
     }
     
-    if (!elector.parentZNodeExists()) {
-      LOG.fatal("Unable to start failover controller. " +
-          "Parent znode does not exist.\n" +
-          "Run with -formatZK flag to initialize ZooKeeper.");
-      return ERR_CODE_NO_PARENT_ZNODE;
+    try {
+      if (!elector.parentZNodeExists()) {
+        LOG.fatal("Unable to start failover controller. " +
+            "Parent znode does not exist.\n" +
+            "Run with -formatZK flag to initialize ZooKeeper.");
+        return ERR_CODE_NO_PARENT_ZNODE;
+      }
+    } catch (IOException ioe) {
+      if (ioe.getCause() instanceof KeeperException.ConnectionLossException) {
+        LOG.fatal("Unable to start failover controller. Unable to connect " +
+            "to ZooKeeper quorum at " + zkQuorum + ". Please check the " +
+            "configured value for " + ZK_QUORUM_KEY + " and ensure that " +
+            "ZooKeeper is running.");
+        return ERR_CODE_NO_ZK;
+      } else {
+        throw ioe;
+      }
     }
 
     try {
@@ -145,8 +226,18 @@
       return ERR_CODE_NO_FENCER;
     }
 
+    initRPC();
     initHM();
-    mainLoop();
+    startRPC();
+    try {
+      mainLoop();
+    } finally {
+      rpcServer.stopAndJoin();
+      
+      elector.quitElection(true);
+      healthMonitor.shutdown();
+      healthMonitor.join();
+    }
     return 0;
   }
 
@@ -181,6 +272,7 @@
   }
 
   private boolean confirmFormat() {
+    String parentZnode = getParentZnode();
     System.err.println(
         "===============================================\n" +
         "The configured parent znode " + parentZnode + " already exists.\n" +
@@ -206,16 +298,40 @@
     healthMonitor.addCallback(new HealthCallbacks());
     healthMonitor.start();
   }
+  
+  protected void initRPC() throws IOException {
+    InetSocketAddress bindAddr = getRpcAddressToBindTo();
+    rpcServer = new ZKFCRpcServer(conf, bindAddr, this, getPolicyProvider());
+  }
+
+  protected void startRPC() throws IOException {
+    rpcServer.start();
+  }
+
 
   private void initZK() throws HadoopIllegalArgumentException, IOException {
-    String zkQuorum = conf.get(ZK_QUORUM_KEY);
+    zkQuorum = conf.get(ZK_QUORUM_KEY);
     int zkTimeout = conf.getInt(ZK_SESSION_TIMEOUT_KEY,
         ZK_SESSION_TIMEOUT_DEFAULT);
-    parentZnode = conf.get(ZK_PARENT_ZNODE_KEY,
-        ZK_PARENT_ZNODE_DEFAULT);
-    // TODO: need ZK ACL support in config, also maybe auth!
-    List<ACL> zkAcls = Ids.OPEN_ACL_UNSAFE;
+    // Parse ACLs from configuration.
+    String zkAclConf = conf.get(ZK_ACL_KEY, ZK_ACL_DEFAULT);
+    zkAclConf = HAZKUtil.resolveConfIndirection(zkAclConf);
+    List<ACL> zkAcls = HAZKUtil.parseACLs(zkAclConf);
+    if (zkAcls.isEmpty()) {
+      zkAcls = Ids.CREATOR_ALL_ACL;
+    }
+    
+    // Parse authentication from configuration.
+    String zkAuthConf = conf.get(ZK_AUTH_KEY);
+    zkAuthConf = HAZKUtil.resolveConfIndirection(zkAuthConf);
+    List<ZKAuthInfo> zkAuths;
+    if (zkAuthConf != null) {
+      zkAuths = HAZKUtil.parseAuth(zkAuthConf);
+    } else {
+      zkAuths = Collections.emptyList();
+    }
 
+    // Sanity check configuration.
     Preconditions.checkArgument(zkQuorum != null,
         "Missing required configuration '%s' for ZooKeeper quorum",
         ZK_QUORUM_KEY);
@@ -224,9 +340,19 @@
     
 
     elector = new ActiveStandbyElector(zkQuorum,
-        zkTimeout, parentZnode, zkAcls, new ElectorCallbacks());
+        zkTimeout, getParentZnode(), zkAcls, zkAuths,
+        new ElectorCallbacks());
   }
   
+  private String getParentZnode() {
+    String znode = conf.get(ZK_PARENT_ZNODE_KEY,
+        ZK_PARENT_ZNODE_DEFAULT);
+    if (!znode.endsWith("/")) {
+      znode += "/";
+    }
+    return znode + getScopeInsideParentNode();
+  }
+
   private synchronized void mainLoop() throws InterruptedException {
     while (fatalError == null) {
       wait();
@@ -242,16 +368,30 @@
     notifyAll();
   }
   
-  private synchronized void becomeActive() {
+  private synchronized void becomeActive() throws ServiceFailedException {
     LOG.info("Trying to make " + localTarget + " active...");
     try {
       HAServiceProtocolHelper.transitionToActive(localTarget.getProxy(
-          conf, FailoverController.getRpcTimeoutToNewActive(conf)));
-      LOG.info("Successfully transitioned " + localTarget +
-          " to active state");
+          conf, FailoverController.getRpcTimeoutToNewActive(conf)),
+          createReqInfo());
+      String msg = "Successfully transitioned " + localTarget +
+          " to active state";
+      LOG.info(msg);
+      recordActiveAttempt(new ActiveAttemptRecord(true, msg));
+
     } catch (Throwable t) {
-      LOG.fatal("Couldn't make " + localTarget + " active", t);
-      elector.quitElection(true);
+      String msg = "Couldn't make " + localTarget + " active";
+      LOG.fatal(msg, t);
+      
+      recordActiveAttempt(new ActiveAttemptRecord(false, msg + "\n" +
+          StringUtils.stringifyException(t)));
+
+      if (t instanceof ServiceFailedException) {
+        throw (ServiceFailedException)t;
+      } else {
+        throw new ServiceFailedException("Couldn't transition to active",
+            t);
+      }
 /*
 * TODO:
 * we need to make sure that if we get fenced and then quickly restarted,
@@ -264,12 +404,79 @@
     }
   }
 
+  /**
+   * Store the results of the last attempt to become active.
+   * This is used so that, during manually initiated failover,
+   * we can report back the results of the attempt to become active
+   * to the initiator of the failover.
+   */
+  private void recordActiveAttempt(
+      ActiveAttemptRecord record) {
+    synchronized (activeAttemptRecordLock) {
+      lastActiveAttemptRecord = record;
+      activeAttemptRecordLock.notifyAll();
+    }
+  }
+
+  /**
+   * Wait until one of the following events:
+   * <ul>
+   * <li>Another thread publishes the results of an attempt to become active
+   * using {@link #recordActiveAttempt(ActiveAttemptRecord)}</li>
+   * <li>The node enters bad health status</li>
+   * <li>The specified timeout elapses</li>
+   * </ul>
+   * 
+   * @param timeoutMillis number of millis to wait
+   * @return the published record, or null if the timeout elapses or the
+   * service becomes unhealthy 
+   * @throws InterruptedException if the thread is interrupted.
+   */
+  private ActiveAttemptRecord waitForActiveAttempt(int timeoutMillis)
+      throws InterruptedException {
+    long st = System.nanoTime();
+    long waitUntil = st + TimeUnit.NANOSECONDS.convert(
+        timeoutMillis, TimeUnit.MILLISECONDS);
+    
+    do {
+      // periodically check health state, because entering an
+      // unhealthy state could prevent us from ever attempting to
+      // become active. We can detect this and respond to the user
+      // immediately.
+      synchronized (this) {
+        if (lastHealthState != State.SERVICE_HEALTHY) {
+          // early out if service became unhealthy
+          return null;
+        }
+      }
+
+      synchronized (activeAttemptRecordLock) {
+        if ((lastActiveAttemptRecord != null &&
+            lastActiveAttemptRecord.nanoTime >= st)) {
+          return lastActiveAttemptRecord;
+        }
+        // Only wait 1sec so that we periodically recheck the health state
+        // above.
+        activeAttemptRecordLock.wait(1000);
+      }
+    } while (System.nanoTime() < waitUntil);
+    
+    // Timeout elapsed.
+    LOG.warn(timeoutMillis + "ms timeout elapsed waiting for an attempt " +
+        "to become active");
+    return null;
+  }
+
+  private StateChangeRequestInfo createReqInfo() {
+    return new StateChangeRequestInfo(RequestSource.REQUEST_BY_ZKFC);
+  }
+
   private synchronized void becomeStandby() {
     LOG.info("ZK Election indicated that " + localTarget +
         " should become standby");
     try {
       int timeout = FailoverController.getGracefulFenceTimeout(conf);
-      localTarget.getProxy(conf, timeout).transitionToStandby();
+      localTarget.getProxy(conf, timeout).transitionToStandby(createReqInfo());
       LOG.info("Successfully transitioned " + localTarget +
           " to standby state");
     } catch (Exception e) {
@@ -279,27 +486,336 @@
       // at the same time.
     }
   }
+  
+
+  private synchronized void fenceOldActive(byte[] data) {
+    HAServiceTarget target = dataToTarget(data);
+    
+    try {
+      doFence(target);
+    } catch (Throwable t) {
+      recordActiveAttempt(new ActiveAttemptRecord(false, "Unable to fence old active: " + StringUtils.stringifyException(t)));
+      Throwables.propagate(t);
+    }
+  }
+  
+  private void doFence(HAServiceTarget target) {
+    LOG.info("Should fence: " + target);
+    boolean gracefulWorked = new FailoverController(conf,
+        RequestSource.REQUEST_BY_ZKFC).tryGracefulFence(target);
+    if (gracefulWorked) {
+      // It's possible that it's in standby but just about to go into active,
+      // no? Is there some race here?
+      LOG.info("Successfully transitioned " + target + " to standby " +
+          "state without fencing");
+      return;
+    }
+    
+    try {
+      target.checkFencingConfigured();
+    } catch (BadFencingConfigurationException e) {
+      LOG.error("Couldn't fence old active " + target, e);
+      recordActiveAttempt(new ActiveAttemptRecord(false, "Unable to fence old active"));
+      throw new RuntimeException(e);
+    }
+    
+    if (!target.getFencer().fence(target)) {
+      throw new RuntimeException("Unable to fence " + target);
+    }
+  }
+
+
+  /**
+   * Request from graceful failover to cede active role. Causes
+   * this ZKFC to transition its local node to standby, then quit
+   * the election for the specified period of time, after which it
+   * will rejoin iff it is healthy.
+   */
+  void cedeActive(final int millisToCede)
+      throws AccessControlException, ServiceFailedException, IOException {
+    try {
+      UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws Exception {
+          doCedeActive(millisToCede);
+          return null;
+        }
+      });
+    } catch (InterruptedException e) {
+      throw new IOException(e);
+    }
+  }
+  
+  private void doCedeActive(int millisToCede) 
+      throws AccessControlException, ServiceFailedException, IOException {
+    int timeout = FailoverController.getGracefulFenceTimeout(conf);
+
+    // Lock elector to maintain lock ordering of elector -> ZKFC
+    synchronized (elector) {
+      synchronized (this) {
+        if (millisToCede <= 0) {
+          delayJoiningUntilNanotime = 0;
+          recheckElectability();
+          return;
+        }
+  
+        LOG.info("Requested by " + UserGroupInformation.getCurrentUser() +
+            " at " + Server.getRemoteAddress() + " to cede active role.");
+        boolean needFence = false;
+        try {
+          localTarget.getProxy(conf, timeout).transitionToStandby(createReqInfo());
+          LOG.info("Successfully ensured local node is in standby mode");
+        } catch (IOException ioe) {
+          LOG.warn("Unable to transition local node to standby: " +
+              ioe.getLocalizedMessage());
+          LOG.warn("Quitting election but indicating that fencing is " +
+              "necessary");
+          needFence = true;
+        }
+        delayJoiningUntilNanotime = System.nanoTime() +
+            TimeUnit.MILLISECONDS.toNanos(millisToCede);
+        elector.quitElection(needFence);
+      }
+    }
+    recheckElectability();
+  }
+  
+  /**
+   * Coordinate a graceful failover to this node.
+   * @throws ServiceFailedException if the node fails to become active
+   * @throws IOException some other error occurs
+   */
+  void gracefulFailoverToYou() throws ServiceFailedException, IOException {
+    try {
+      UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws Exception {
+          doGracefulFailover();
+          return null;
+        }
+        
+      });
+    } catch (InterruptedException e) {
+      throw new IOException(e);
+    }
+  }
+
+  /**
+   * Coordinate a graceful failover. This proceeds in several phases:
+   * 1) Pre-flight checks: ensure that the local node is healthy, and
+   * thus a candidate for failover.
+   * 2) Determine the current active node. If it is the local node, no
+   * need to failover - return success.
+   * 3) Ask that node to yield from the election for a number of seconds.
+   * 4) Allow the normal election path to run in other threads. Wait until
+   * we either become unhealthy or we see an election attempt recorded by
+   * the normal code path.
+   * 5) Allow the old active to rejoin the election, so a future
+   * failback is possible.
+   */
+  private void doGracefulFailover()
+      throws ServiceFailedException, IOException, InterruptedException {
+    int timeout = FailoverController.getGracefulFenceTimeout(conf) * 2;
+    
+    // Phase 1: pre-flight checks
+    checkEligibleForFailover();
+    
+    // Phase 2: determine old/current active node. Check that we're not
+    // ourselves active, etc.
+    HAServiceTarget oldActive = getCurrentActive();
+    if (oldActive == null) {
+      // No node is currently active. So, if we aren't already
+      // active ourselves by means of a normal election, then there's
+      // probably something preventing us from becoming active.
+      throw new ServiceFailedException(
+          "No other node is currently active.");
+    }
+    
+    if (oldActive.getAddress().equals(localTarget.getAddress())) {
+      LOG.info("Local node " + localTarget + " is already active. " +
+          "No need to failover. Returning success.");
+      return;
+    }
+    
+    // Phase 3: ask the old active to yield from the election.
+    LOG.info("Asking " + oldActive + " to cede its active state for " +
+        timeout + "ms");
+    ZKFCProtocol oldZkfc = oldActive.getZKFCProxy(conf, timeout);
+    oldZkfc.cedeActive(timeout);
+
+    // Phase 4: wait for the normal election to make the local node
+    // active.
+    ActiveAttemptRecord attempt = waitForActiveAttempt(timeout + 60000);
+    
+    if (attempt == null) {
+      // We didn't even make an attempt to become active.
+      synchronized(this) {
+        if (lastHealthState != State.SERVICE_HEALTHY) {
+          throw new ServiceFailedException("Unable to become active. " +
+            "Service became unhealthy while trying to failover.");          
+        }
+      }
+      
+      throw new ServiceFailedException("Unable to become active. " +
+          "Local node did not get an opportunity to do so from ZooKeeper, " +
+          "or the local node took too long to transition to active.");
+    }
+
+    // Phase 5. At this point, we made some attempt to become active. So we
+    // can tell the old active to rejoin if it wants. This allows a quick
+    // fail-back if we immediately crash.
+    oldZkfc.cedeActive(-1);
+    
+    if (attempt.succeeded) {
+      LOG.info("Successfully became active. " + attempt.status);
+    } else {
+      // Propagate failure
+      String msg = "Failed to become active. " + attempt.status;
+      throw new ServiceFailedException(msg);
+    }
+  }
+
+  /**
+   * Ensure that the local node is in a healthy state, and thus
+   * eligible for graceful failover.
+   * @throws ServiceFailedException if the node is unhealthy
+   */
+  private synchronized void checkEligibleForFailover()
+      throws ServiceFailedException {
+    // Check health
+    if (this.getLastHealthState() != State.SERVICE_HEALTHY) {
+      throw new ServiceFailedException(
+          localTarget + " is not currently healthy. " +
+          "Cannot be failover target");
+    }
+  }
+
+  /**
+   * @return an {@link HAServiceTarget} for the current active node
+   * in the cluster, or null if no node is active.
+   * @throws IOException if a ZK-related issue occurs
+   * @throws InterruptedException if thread is interrupted 
+   */
+  private HAServiceTarget getCurrentActive()
+      throws IOException, InterruptedException {
+    synchronized (elector) {
+      synchronized (this) {
+        byte[] activeData;
+        try {
+          activeData = elector.getActiveData();
+        } catch (ActiveNotFoundException e) {
+          return null;
+        } catch (KeeperException ke) {
+          throw new IOException(
+              "Unexpected ZooKeeper issue fetching active node info", ke);
+        }
+        
+        HAServiceTarget oldActive = dataToTarget(activeData);
+        return oldActive;
+      }
+    }
+  }
+
+  /**
+   * Check the current state of the service, and join the election
+   * if it should be in the election.
+   */
+  private void recheckElectability() {
+    // Maintain lock ordering of elector -> ZKFC
+    synchronized (elector) {
+      synchronized (this) {
+        boolean healthy = lastHealthState == State.SERVICE_HEALTHY;
+    
+        long remainingDelay = delayJoiningUntilNanotime - System.nanoTime(); 
+        if (remainingDelay > 0) {
+          if (healthy) {
+            LOG.info("Would have joined master election, but this node is " +
+                "prohibited from doing so for " +
+                TimeUnit.NANOSECONDS.toMillis(remainingDelay) + " more ms");
+          }
+          scheduleRecheck(remainingDelay);
+          return;
+        }
+    
+        switch (lastHealthState) {
+        case SERVICE_HEALTHY:
+          elector.joinElection(targetToData(localTarget));
+          break;
+          
+        case INITIALIZING:
+          LOG.info("Ensuring that " + localTarget + " does not " +
+              "participate in active master election");
+          elector.quitElection(false);
+          break;
+    
+        case SERVICE_UNHEALTHY:
+        case SERVICE_NOT_RESPONDING:
+          LOG.info("Quitting master election for " + localTarget +
+              " and marking that fencing is necessary");
+          elector.quitElection(true);
+          break;
+          
+        case HEALTH_MONITOR_FAILED:
+          fatalError("Health monitor failed!");
+          break;
+          
+        default:
+          throw new IllegalArgumentException("Unhandled state:" + lastHealthState);
+        }
+      }
+    }
+  }
+  
+  /**
+   * Schedule a call to {@link #recheckElectability()} in the future.
+   */
+  private void scheduleRecheck(long whenNanos) {
+    delayExecutor.schedule(
+        new Runnable() {
+          @Override
+          public void run() {
+            try {
+              recheckElectability();
+            } catch (Throwable t) {
+              fatalError("Failed to recheck electability: " +
+                  StringUtils.stringifyException(t));
+            }
+          }
+        },
+        whenNanos, TimeUnit.NANOSECONDS);
+  }
 
   /**
    * @return the last health state passed to the FC
    * by the HealthMonitor.
    */
   @VisibleForTesting
-  State getLastHealthState() {
+  synchronized State getLastHealthState() {
     return lastHealthState;
   }
+
+  private synchronized void setLastHealthState(HealthMonitor.State newState) {
+    LOG.info("Local service " + localTarget +
+        " entered state: " + newState);
+    lastHealthState = newState;
+  }
   
   @VisibleForTesting
   ActiveStandbyElector getElectorForTests() {
     return elector;
   }
+  
+  @VisibleForTesting
+  ZKFCRpcServer getRpcServerForTests() {
+    return rpcServer;
+  }
 
   /**
    * Callbacks from elector
    */
   class ElectorCallbacks implements ActiveStandbyElectorCallback {
     @Override
-    public void becomeActive() {
+    public void becomeActive() throws ServiceFailedException {
       ZKFailoverController.this.becomeActive();
     }
 
@@ -319,31 +835,13 @@
 
     @Override
     public void fenceOldActive(byte[] data) {
-      HAServiceTarget target = dataToTarget(data);
-      
-      LOG.info("Should fence: " + target);
-      boolean gracefulWorked = new FailoverController(conf)
-          .tryGracefulFence(target);
-      if (gracefulWorked) {
-        // It's possible that it's in standby but just about to go into active,
-        // no? Is there some race here?
-        LOG.info("Successfully transitioned " + target + " to standby " +
-            "state without fencing");
-        return;
-      }
-      
-      try {
-        target.checkFencingConfigured();
-      } catch (BadFencingConfigurationException e) {
-        LOG.error("Couldn't fence old active " + target, e);
-        // TODO: see below todo
-        throw new RuntimeException(e);
-      }
-      
-      if (!target.getFencer().fence(target)) {
-        // TODO: this will end up in some kind of tight loop,
-        // won't it? We need some kind of backoff
-        throw new RuntimeException("Unable to fence " + target);
+      ZKFailoverController.this.fenceOldActive(data);
+    }
+    
+    @Override
+    public String toString() {
+      synchronized (ZKFailoverController.this) {
+        return "Elector callbacks for " + localTarget;
       }
     }
   }
@@ -354,36 +852,21 @@
   class HealthCallbacks implements HealthMonitor.Callback {
     @Override
     public void enteredState(HealthMonitor.State newState) {
-      LOG.info("Local service " + localTarget +
-          " entered state: " + newState);
-      switch (newState) {
-      case SERVICE_HEALTHY:
-        LOG.info("Joining master election for " + localTarget);
-        elector.joinElection(targetToData(localTarget));
-        break;
-        
-      case INITIALIZING:
-        LOG.info("Ensuring that " + localTarget + " does not " +
-            "participate in active master election");
-        elector.quitElection(false);
-        break;
-
-      case SERVICE_UNHEALTHY:
-      case SERVICE_NOT_RESPONDING:
-        LOG.info("Quitting master election for " + localTarget +
-            " and marking that fencing is necessary");
-        elector.quitElection(true);
-        break;
-        
-      case HEALTH_MONITOR_FAILED:
-        fatalError("Health monitor failed!");
-        break;
-        
-      default:
-        throw new IllegalArgumentException("Unhandled state:" + newState);
-      }
-      
-      lastHealthState = newState;
+      setLastHealthState(newState);
+      recheckElectability();
     }
   }
+  
+  private static class ActiveAttemptRecord {
+    private final boolean succeeded;
+    private final String status;
+    private final long nanoTime;
+    
+    public ActiveAttemptRecord(boolean succeeded, String status) {
+      this.succeeded = succeeded;
+      this.status = status;
+      this.nanoTime = System.nanoTime();
+    }
+  }
+
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java
index c269bd6..589ccd1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java
@@ -30,13 +30,14 @@
 import org.apache.hadoop.ha.HAServiceStatus;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto;
+import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto;
+import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HARequestSource;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -57,10 +58,6 @@
   private final static RpcController NULL_CONTROLLER = null;
   private final static MonitorHealthRequestProto MONITOR_HEALTH_REQ = 
       MonitorHealthRequestProto.newBuilder().build();
-  private final static TransitionToActiveRequestProto TRANSITION_TO_ACTIVE_REQ = 
-      TransitionToActiveRequestProto.newBuilder().build();
-  private final static TransitionToStandbyRequestProto TRANSITION_TO_STANDBY_REQ = 
-      TransitionToStandbyRequestProto.newBuilder().build();
   private final static GetServiceStatusRequestProto GET_SERVICE_STATUS_REQ = 
       GetServiceStatusRequestProto.newBuilder().build();
   
@@ -94,18 +91,25 @@
   }
 
   @Override
-  public void transitionToActive() throws IOException {
+  public void transitionToActive(StateChangeRequestInfo reqInfo) throws IOException {
     try {
-      rpcProxy.transitionToActive(NULL_CONTROLLER, TRANSITION_TO_ACTIVE_REQ);
+      TransitionToActiveRequestProto req =
+          TransitionToActiveRequestProto.newBuilder()
+            .setReqInfo(convert(reqInfo)).build();
+
+      rpcProxy.transitionToActive(NULL_CONTROLLER, req);
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
     }
   }
 
   @Override
-  public void transitionToStandby() throws IOException {
+  public void transitionToStandby(StateChangeRequestInfo reqInfo) throws IOException {
     try {
-      rpcProxy.transitionToStandby(NULL_CONTROLLER, TRANSITION_TO_STANDBY_REQ);
+      TransitionToStandbyRequestProto req =
+        TransitionToStandbyRequestProto.newBuilder()
+          .setReqInfo(convert(reqInfo)).build();
+      rpcProxy.transitionToStandby(NULL_CONTROLLER, req);
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
     }
@@ -143,6 +147,27 @@
     }
   }
   
+  private HAStateChangeRequestInfoProto convert(StateChangeRequestInfo reqInfo) {
+    HARequestSource src;
+    switch (reqInfo.getSource()) {
+    case REQUEST_BY_USER:
+      src = HARequestSource.REQUEST_BY_USER;
+      break;
+    case REQUEST_BY_USER_FORCED:
+      src = HARequestSource.REQUEST_BY_USER_FORCED;
+      break;
+    case REQUEST_BY_ZKFC:
+      src = HARequestSource.REQUEST_BY_ZKFC;
+      break;
+    default:
+      throw new IllegalArgumentException("Bad source: " + reqInfo.getSource());
+    }
+    return HAStateChangeRequestInfoProto.newBuilder()
+        .setReqSource(src)
+        .build();
+  }
+
+
   @Override
   public void close() {
     RPC.stopProxy(rpcProxy);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java
index b5b6a89..63bfbca 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java
@@ -19,12 +19,17 @@
 
 import java.io.IOException;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
 import org.apache.hadoop.ha.HAServiceStatus;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto;
+import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto;
@@ -56,6 +61,8 @@
       TransitionToActiveResponseProto.newBuilder().build();
   private static final TransitionToStandbyResponseProto TRANSITION_TO_STANDBY_RESP = 
       TransitionToStandbyResponseProto.newBuilder().build();
+  private static final Log LOG = LogFactory.getLog(
+      HAServiceProtocolServerSideTranslatorPB.class);
   
   public HAServiceProtocolServerSideTranslatorPB(HAServiceProtocol server) {
     this.server = server;
@@ -71,13 +78,33 @@
       throw new ServiceException(e);
     }
   }
+  
+  private StateChangeRequestInfo convert(HAStateChangeRequestInfoProto proto) {
+    RequestSource src;
+    switch (proto.getReqSource()) {
+    case REQUEST_BY_USER:
+      src = RequestSource.REQUEST_BY_USER;
+      break;
+    case REQUEST_BY_USER_FORCED:
+      src = RequestSource.REQUEST_BY_USER_FORCED;
+      break;
+    case REQUEST_BY_ZKFC:
+      src = RequestSource.REQUEST_BY_ZKFC;
+      break;
+    default:
+      LOG.warn("Unknown request source: " + proto.getReqSource());
+      src = null;
+    }
+    
+    return new StateChangeRequestInfo(src);
+  }
 
   @Override
   public TransitionToActiveResponseProto transitionToActive(
       RpcController controller, TransitionToActiveRequestProto request)
       throws ServiceException {
     try {
-      server.transitionToActive();
+      server.transitionToActive(convert(request.getReqInfo()));
       return TRANSITION_TO_ACTIVE_RESP;
     } catch(IOException e) {
       throw new ServiceException(e);
@@ -89,7 +116,7 @@
       RpcController controller, TransitionToStandbyRequestProto request)
       throws ServiceException {
     try {
-      server.transitionToStandby();
+      server.transitionToStandby(convert(request.getReqInfo()));
       return TRANSITION_TO_STANDBY_RESP;
     } catch(IOException e) {
       throw new ServiceException(e);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java
new file mode 100644
index 0000000..62896fa8
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha.protocolPB;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import javax.net.SocketFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ha.ZKFCProtocol;
+import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.CedeActiveRequestProto;
+import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.GracefulFailoverRequestProto;
+import org.apache.hadoop.ipc.ProtobufHelper;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.ProtocolTranslator;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+
+public class ZKFCProtocolClientSideTranslatorPB implements
+  ZKFCProtocol, Closeable, ProtocolTranslator {
+
+  private final static RpcController NULL_CONTROLLER = null;
+  private final ZKFCProtocolPB rpcProxy;
+
+  public ZKFCProtocolClientSideTranslatorPB(
+      InetSocketAddress addr, Configuration conf,
+      SocketFactory socketFactory, int timeout) throws IOException {
+    RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
+        ProtobufRpcEngine.class);
+    rpcProxy = RPC.getProxy(ZKFCProtocolPB.class,
+        RPC.getProtocolVersion(ZKFCProtocolPB.class), addr,
+        UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout);
+  }
+
+  @Override
+  public void cedeActive(int millisToCede) throws IOException,
+      AccessControlException {
+    try {
+      CedeActiveRequestProto req = CedeActiveRequestProto.newBuilder()
+          .setMillisToCede(millisToCede)
+          .build();
+      rpcProxy.cedeActive(NULL_CONTROLLER, req);      
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public void gracefulFailover() throws IOException, AccessControlException {
+    try {
+      rpcProxy.gracefulFailover(NULL_CONTROLLER,
+          GracefulFailoverRequestProto.getDefaultInstance());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+
+  @Override
+  public void close() {
+    RPC.stopProxy(rpcProxy);
+  }
+
+  @Override
+  public Object getUnderlyingProxyObject() {
+    return rpcProxy;
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolPB.java
new file mode 100644
index 0000000..348004f
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolPB.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha.protocolPB;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.ZKFCProtocolService;
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.ipc.VersionedProtocol;
+import org.apache.hadoop.security.KerberosInfo;
+
+@KerberosInfo(
+    serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
+@ProtocolInfo(protocolName = "org.apache.hadoop.ha.ZKFCProtocol", 
+    protocolVersion = 1)
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface ZKFCProtocolPB extends
+    ZKFCProtocolService.BlockingInterface, VersionedProtocol {
+  /**
+   * If any methods need annotation, it can be added here
+   */
+}
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolServerSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolServerSideTranslatorPB.java
new file mode 100644
index 0000000..5494998
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolServerSideTranslatorPB.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha.protocolPB;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.ha.ZKFCProtocol;
+import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.CedeActiveRequestProto;
+import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.CedeActiveResponseProto;
+import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.GracefulFailoverRequestProto;
+import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.GracefulFailoverResponseProto;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.ipc.RPC;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class ZKFCProtocolServerSideTranslatorPB implements
+    ZKFCProtocolPB {
+  private final ZKFCProtocol server;
+  
+  public ZKFCProtocolServerSideTranslatorPB(ZKFCProtocol server) {
+    this.server = server;
+  }
+
+  @Override
+  public CedeActiveResponseProto cedeActive(RpcController controller,
+      CedeActiveRequestProto request) throws ServiceException {
+    try {
+      server.cedeActive(request.getMillisToCede());
+      return CedeActiveResponseProto.getDefaultInstance();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public GracefulFailoverResponseProto gracefulFailover(
+      RpcController controller, GracefulFailoverRequestProto request)
+      throws ServiceException {
+    try {
+      server.gracefulFailover();
+      return GracefulFailoverResponseProto.getDefaultInstance();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public long getProtocolVersion(String protocol, long clientVersion)
+      throws IOException {
+    return RPC.getProtocolVersion(ZKFCProtocolPB.class);
+  }
+
+  @Override
+  public ProtocolSignature getProtocolSignature(String protocol,
+      long clientVersion, int clientMethodsHash) throws IOException {
+    if (!protocol.equals(RPC.getProtocolName(ZKFCProtocolPB.class))) {
+      throw new IOException("Serverside implements " +
+          RPC.getProtocolName(ZKFCProtocolPB.class) +
+          ". The following requested protocol is unknown: " + protocol);
+    }
+
+    return ProtocolSignature.getProtocolSignature(clientMethodsHash,
+        RPC.getProtocolVersion(ZKFCProtocolPB.class),
+        HAServiceProtocolPB.class);
+  }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
index be4f26f..ded6870 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
@@ -96,7 +96,7 @@
   // The ServletContext attribute where the daemon Configuration
   // gets stored.
   public static final String CONF_CONTEXT_ATTRIBUTE = "hadoop.conf";
-  static final String ADMINS_ACL = "admins.acl";
+  public static final String ADMINS_ACL = "admins.acl";
   public static final String SPNEGO_FILTER = "SpnegoFilter";
 
   public static final String BIND_ADDRESS = "bind.address";
@@ -792,7 +792,7 @@
    * 
    * @param servletContext
    * @param request
-   * @param response
+   * @param response used to send the error response if user does not have admin access.
    * @return true if admin-authorized, false otherwise
    * @throws IOException
    */
@@ -814,18 +814,33 @@
                          "authorized to access this page.");
       return false;
     }
+    
+    if (servletContext.getAttribute(ADMINS_ACL) != null &&
+        !userHasAdministratorAccess(servletContext, remoteUser)) {
+      response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "User "
+          + remoteUser + " is unauthorized to access this page.");
+      return false;
+    }
+
+    return true;
+  }
+
+  /**
+   * Get the admin ACLs from the given ServletContext and check if the given
+   * user is in the ACL.
+   * 
+   * @param servletContext the context containing the admin ACL.
+   * @param remoteUser the remote user to check for.
+   * @return true if the user is present in the ACL, false if no ACL is set or
+   *         the user is not present
+   */
+  public static boolean userHasAdministratorAccess(ServletContext servletContext,
+      String remoteUser) {
     AccessControlList adminsAcl = (AccessControlList) servletContext
         .getAttribute(ADMINS_ACL);
     UserGroupInformation remoteUserUGI =
         UserGroupInformation.createRemoteUser(remoteUser);
-    if (adminsAcl != null) {
-      if (!adminsAcl.isUserAllowed(remoteUserUGI)) {
-        response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "User "
-            + remoteUser + " is unauthorized to access this page.");
-        return false;
-      }
-    }
-    return true;
+    return adminsAcl != null && adminsAcl.isUserAllowed(remoteUserUGI);
   }
 
   /**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java
index f1ee20c..9ca5b92 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java
@@ -37,15 +37,15 @@
 
 import javax.servlet.Filter;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
+
 /**
  * Provides a servlet filter that pretends to authenticate a fake user (Dr.Who)
  * so that the web UI is usable for a secure cluster without authentication.
  */
 public class StaticUserWebFilter extends FilterInitializer {
   static final String DEPRECATED_UGI_KEY = "dfs.web.ugi";
-  
-  static final String USERNAME_KEY = "hadoop.http.staticuser.user";
-  static final String USERNAME_DEFAULT = "dr.who";
 
   private static final Log LOG = LogFactory.getLog(StaticUserWebFilter.class);
 
@@ -112,7 +112,7 @@
 
     @Override
     public void init(FilterConfig conf) throws ServletException {
-      this.username = conf.getInitParameter(USERNAME_KEY);
+      this.username = conf.getInitParameter(HADOOP_HTTP_STATIC_USER);
       this.user = new User(username);
     }
     
@@ -123,7 +123,7 @@
     HashMap<String, String> options = new HashMap<String, String>();
     
     String username = getUsernameFromConf(conf);
-    options.put(USERNAME_KEY, username);
+    options.put(HADOOP_HTTP_STATIC_USER, username);
 
     container.addFilter("static_user_filter", 
                         StaticUserFilter.class.getName(), 
@@ -139,11 +139,12 @@
       // We can't use the normal configuration deprecation mechanism here
       // since we need to split out the username from the configured UGI.
       LOG.warn(DEPRECATED_UGI_KEY + " should not be used. Instead, use " + 
-               USERNAME_KEY + ".");
+          HADOOP_HTTP_STATIC_USER + ".");
       String[] parts = oldStyleUgi.split(",");
       return parts[0];
     } else {
-      return conf.get(USERNAME_KEY, USERNAME_DEFAULT);
+      return conf.get(HADOOP_HTTP_STATIC_USER,
+        DEFAULT_HADOOP_HTTP_STATIC_USER);
     }
   }
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
index 0919563..f32d399 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
@@ -807,7 +807,7 @@
   }
   
   /** Write key/value pairs to a sequence-format file. */
-  public static class Writer implements java.io.Closeable {
+  public static class Writer implements java.io.Closeable, Syncable {
     private Configuration conf;
     FSDataOutputStream out;
     boolean ownOutputStream = true;
@@ -1193,13 +1193,31 @@
       }
     }
 
-    /** flush all currently written data to the file system */
+    /**
+     * flush all currently written data to the file system
+     * @deprecated Use {@link #hsync()} or {@link #hflush()} instead
+     */
+    @Deprecated
     public void syncFs() throws IOException {
       if (out != null) {
         out.hflush();  // flush contents to file system
       }
     }
 
+    @Override
+    public void hsync() throws IOException {
+      if (out != null) {
+        out.hsync();
+      }
+    }
+
+    @Override
+    public void hflush() throws IOException {
+      if (out != null) {
+        out.hflush();
+      }
+    }
+    
     /** Returns the configuration of this file. */
     Configuration getConf() { return conf; }
     
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
index 0bee332..78748b0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
@@ -236,6 +236,11 @@
 
   /**
    * Clear the string to empty.
+   *
+   * <em>Note</em>: For performance reasons, this call does not clear the
+   * underlying byte array that is retrievable via {@link #getBytes()}.
+   * In order to free the byte-array memory, call {@link #set(byte[])}
+   * with an empty byte array (For example, <code>new byte[0]</code>).
    */
   public void clear() {
     length = 0;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Writable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Writable.java
index 39557a1..f0fe6fb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Writable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Writable.java
@@ -39,20 +39,23 @@
  * <p>Example:</p>
  * <p><blockquote><pre>
  *     public class MyWritable implements Writable {
- *       // Some data     
+ *       // Some data
  *       private int counter;
  *       private long timestamp;
- *       
+ *
+ *       // Default constructor to allow (de)serialization
+ *       MyWritable() { }
+ *
  *       public void write(DataOutput out) throws IOException {
  *         out.writeInt(counter);
  *         out.writeLong(timestamp);
  *       }
- *       
+ *
  *       public void readFields(DataInput in) throws IOException {
  *         counter = in.readInt();
  *         timestamp = in.readLong();
  *       }
- *       
+ *
  *       public static MyWritable read(DataInput in) throws IOException {
  *         MyWritable w = new MyWritable();
  *         w.readFields(in);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
index e12dcfe..dc95e9e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
@@ -109,8 +109,12 @@
     List<Class<? extends CompressionCodec>> result
       = new ArrayList<Class<? extends CompressionCodec>>();
     // Add codec classes discovered via service loading
-    for (CompressionCodec codec : CODEC_PROVIDERS) {
-      result.add(codec.getClass());
+    synchronized (CODEC_PROVIDERS) {
+      // CODEC_PROVIDERS is a lazy collection. Synchronize so it is
+      // thread-safe. See HADOOP-8406.
+      for (CompressionCodec codec : CODEC_PROVIDERS) {
+        result.add(codec.getClass());
+      }
     }
     // Add codec classes from configuration
     String codecsString = conf.get("io.compression.codecs");
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 0831413..ef32cfd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -53,6 +53,8 @@
 import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
 import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadHeaderProto;
 import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadOperationProto;
+import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcResponseHeaderProto;
+import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcStatusProto;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
@@ -845,24 +847,24 @@
       touch();
       
       try {
-        int id = in.readInt();                    // try to read an id
-
+        RpcResponseHeaderProto response = 
+            RpcResponseHeaderProto.parseDelimitedFrom(in);
+        int callId = response.getCallId();
         if (LOG.isDebugEnabled())
-          LOG.debug(getName() + " got value #" + id);
+          LOG.debug(getName() + " got value #" + callId);
 
-        Call call = calls.get(id);
-
-        int state = in.readInt();     // read call status
-        if (state == Status.SUCCESS.state) {
+        Call call = calls.get(callId);
+        RpcStatusProto status = response.getStatus();
+        if (status == RpcStatusProto.SUCCESS) {
           Writable value = ReflectionUtils.newInstance(valueClass, conf);
           value.readFields(in);                 // read value
           call.setRpcResponse(value);
-          calls.remove(id);
-        } else if (state == Status.ERROR.state) {
+          calls.remove(callId);
+        } else if (status == RpcStatusProto.ERROR) {
           call.setException(new RemoteException(WritableUtils.readString(in),
                                                 WritableUtils.readString(in)));
-          calls.remove(id);
-        } else if (state == Status.FATAL.state) {
+          calls.remove(callId);
+        } else if (status == RpcStatusProto.FATAL) {
           // Close the connection
           markClosed(new RemoteException(WritableUtils.readString(in), 
                                          WritableUtils.readString(in)));
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 2d3f91e..1338419 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -396,24 +396,44 @@
        * it is.</li>
        * </ol>
        */
-      public Writable call(RPC.Server server, String protocol,
+      public Writable call(RPC.Server server, String connectionProtocolName,
           Writable writableRequest, long receiveTime) throws Exception {
         RpcRequestWritable request = (RpcRequestWritable) writableRequest;
         HadoopRpcRequestProto rpcRequest = request.message;
         String methodName = rpcRequest.getMethodName();
-        String protoName = rpcRequest.getDeclaringClassProtocolName();
+        
+        
+        /** 
+         * RPCs for a particular interface (ie protocol) are done using a
+         * IPC connection that is setup using rpcProxy.
+         * The rpcProxy's has a declared protocol name that is 
+         * sent form client to server at connection time. 
+         * 
+         * Each Rpc call also sends a protocol name 
+         * (called declaringClassprotocolName). This name is usually the same
+         * as the connection protocol name except in some cases. 
+         * For example metaProtocols such ProtocolInfoProto which get info
+         * about the protocol reuse the connection but need to indicate that
+         * the actual protocol is different (i.e. the protocol is
+         * ProtocolInfoProto) since they reuse the connection; in this case
+         * the declaringClassProtocolName field is set to the ProtocolInfoProto.
+         */
+
+        String declaringClassProtoName = 
+            rpcRequest.getDeclaringClassProtocolName();
         long clientVersion = rpcRequest.getClientProtocolVersion();
         if (server.verbose)
-          LOG.info("Call: protocol=" + protocol + ", method=" + methodName);
+          LOG.info("Call: connectionProtocolName=" + connectionProtocolName + 
+              ", method=" + methodName);
         
-        ProtoClassProtoImpl protocolImpl = getProtocolImpl(server, protoName,
-            clientVersion);
+        ProtoClassProtoImpl protocolImpl = getProtocolImpl(server, 
+                              declaringClassProtoName, clientVersion);
         BlockingService service = (BlockingService) protocolImpl.protocolImpl;
         MethodDescriptor methodDescriptor = service.getDescriptorForType()
             .findMethodByName(methodName);
         if (methodDescriptor == null) {
-          String msg = "Unknown method " + methodName + " called on " + protocol
-              + " protocol.";
+          String msg = "Unknown method " + methodName + " called on " 
+                                + connectionProtocolName + " protocol.";
           LOG.warn(msg);
           throw new RpcServerException(msg);
         }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 6369f3a..3173ad0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -1339,7 +1339,7 @@
               + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION
               + ") is configured as simple. Please configure another method "
               + "like kerberos or digest.");
-            setupResponse(authFailedResponse, authFailedCall, Status.FATAL,
+            setupResponse(authFailedResponse, authFailedCall, RpcStatusProto.FATAL,
                 null, ae.getClass().getName(), ae.getMessage());
             responder.doRespond(authFailedCall);
             throw ae;
@@ -1420,7 +1420,7 @@
         Call fakeCall =  new Call(-1, null, this);
         // Versions 3 and greater can interpret this exception
         // response in the same manner
-        setupResponse(buffer, fakeCall, Status.FATAL,
+        setupResponseOldVersionFatal(buffer, fakeCall,
             null, VersionMismatch.class.getName(), errMsg);
 
         responder.doRespond(fakeCall);
@@ -1443,7 +1443,7 @@
       ByteArrayOutputStream buffer = new ByteArrayOutputStream();
 
       Call fakeCall = new Call(-1, null, this);
-      setupResponse(buffer, fakeCall, Status.FATAL, null,
+      setupResponse(buffer, fakeCall, RpcStatusProto.FATAL, null,
           IpcException.class.getName(), errMsg);
       responder.doRespond(fakeCall);
     }
@@ -1579,7 +1579,7 @@
             new Call(header.getCallId(), null, this);
         ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
 
-        setupResponse(responseBuffer, readParamsFailedCall, Status.FATAL, null,
+        setupResponse(responseBuffer, readParamsFailedCall, RpcStatusProto.FATAL, null,
             IOException.class.getName(),
             "Unknown rpc kind "  + header.getRpcKind());
         responder.doRespond(readParamsFailedCall);
@@ -1597,7 +1597,7 @@
             new Call(header.getCallId(), null, this);
         ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
 
-        setupResponse(responseBuffer, readParamsFailedCall, Status.FATAL, null,
+        setupResponse(responseBuffer, readParamsFailedCall, RpcStatusProto.FATAL, null,
             t.getClass().getName(),
             "IPC server unable to read call parameters: " + t.getMessage());
         responder.doRespond(readParamsFailedCall);
@@ -1627,7 +1627,7 @@
         rpcMetrics.incrAuthorizationSuccesses();
       } catch (AuthorizationException ae) {
         rpcMetrics.incrAuthorizationFailures();
-        setupResponse(authFailedResponse, authFailedCall, Status.FATAL, null,
+        setupResponse(authFailedResponse, authFailedCall, RpcStatusProto.FATAL, null,
             ae.getClass().getName(), ae.getMessage());
         responder.doRespond(authFailedCall);
         return false;
@@ -1725,8 +1725,8 @@
             // responder.doResponse() since setupResponse may use
             // SASL to encrypt response data and SASL enforces
             // its own message ordering.
-            setupResponse(buf, call, (error == null) ? Status.SUCCESS
-                : Status.ERROR, value, errorClass, error);
+            setupResponse(buf, call, (error == null) ? RpcStatusProto.SUCCESS
+                : RpcStatusProto.ERROR, value, errorClass, error);
             
             // Discard the large buf and reset it back to smaller size 
             // to free up heap
@@ -1859,41 +1859,80 @@
   /**
    * Setup response for the IPC Call.
    * 
-   * @param response buffer to serialize the response into
+   * @param responseBuf buffer to serialize the response into
    * @param call {@link Call} to which we are setting up the response
-   * @param status {@link Status} of the IPC call
+   * @param status of the IPC call
    * @param rv return value for the IPC Call, if the call was successful
    * @param errorClass error class, if the the call failed
    * @param error error message, if the call failed
    * @throws IOException
    */
-  private void setupResponse(ByteArrayOutputStream response, 
-                             Call call, Status status, 
+  private void setupResponse(ByteArrayOutputStream responseBuf,
+                             Call call, RpcStatusProto status, 
                              Writable rv, String errorClass, String error) 
   throws IOException {
-    response.reset();
-    DataOutputStream out = new DataOutputStream(response);
-    out.writeInt(call.callId);                // write call id
-    out.writeInt(status.state);           // write status
+    responseBuf.reset();
+    DataOutputStream out = new DataOutputStream(responseBuf);
+    RpcResponseHeaderProto.Builder response =  
+        RpcResponseHeaderProto.newBuilder();
+    response.setCallId(call.callId);
+    response.setStatus(status);
 
-    if (status == Status.SUCCESS) {
+
+    if (status == RpcStatusProto.SUCCESS) {
       try {
+        response.build().writeDelimitedTo(out);
         rv.write(out);
       } catch (Throwable t) {
         LOG.warn("Error serializing call response for call " + call, t);
         // Call back to same function - this is OK since the
         // buffer is reset at the top, and since status is changed
         // to ERROR it won't infinite loop.
-        setupResponse(response, call, Status.ERROR,
+        setupResponse(responseBuf, call, RpcStatusProto.ERROR,
             null, t.getClass().getName(),
             StringUtils.stringifyException(t));
         return;
       }
     } else {
+      if (status == RpcStatusProto.FATAL) {
+        response.setServerIpcVersionNum(Server.CURRENT_VERSION);
+      }
+      response.build().writeDelimitedTo(out);
       WritableUtils.writeString(out, errorClass);
       WritableUtils.writeString(out, error);
     }
     if (call.connection.useWrap) {
+      wrapWithSasl(responseBuf, call);
+    }
+    call.setResponse(ByteBuffer.wrap(responseBuf.toByteArray()));
+  }
+  
+  /**
+   * Setup response for the IPC Call on Fatal Error from a 
+   * client that is using old version of Hadoop.
+   * The response is serialized using the previous protocol's response
+   * layout.
+   * 
+   * @param response buffer to serialize the response into
+   * @param call {@link Call} to which we are setting up the response
+   * @param rv return value for the IPC Call, if the call was successful
+   * @param errorClass error class, if the the call failed
+   * @param error error message, if the call failed
+   * @throws IOException
+   */
+  private void setupResponseOldVersionFatal(ByteArrayOutputStream response, 
+                             Call call,
+                             Writable rv, String errorClass, String error) 
+  throws IOException {
+    final int OLD_VERSION_FATAL_STATUS = -1;
+    response.reset();
+    DataOutputStream out = new DataOutputStream(response);
+    out.writeInt(call.callId);                // write call id
+    out.writeInt(OLD_VERSION_FATAL_STATUS);   // write FATAL_STATUS
+    WritableUtils.writeString(out, errorClass);
+    WritableUtils.writeString(out, error);
+
+    if (call.connection.useWrap) {
       wrapWithSasl(response, call);
     }
     call.setResponse(ByteBuffer.wrap(response.toByteArray()));
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Status.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Status.java
deleted file mode 100644
index 16fd871..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Status.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ipc;
-
-/**
- * Status of a Hadoop IPC call.
- */
-enum Status {
-  SUCCESS (0),
-  ERROR (1),
-  FATAL (-1);
-  
-  int state;
-  private Status(int state) {
-    this.state = state;
-  }
-}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
index 8dc83a3..84dc15c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
@@ -34,6 +34,7 @@
 import javax.management.MalformedObjectNameException;
 import javax.management.ObjectName;
 import javax.management.ReflectionException;
+import javax.management.RuntimeErrorException;
 import javax.management.RuntimeMBeanException;
 import javax.management.openmbean.CompositeData;
 import javax.management.openmbean.CompositeType;
@@ -317,6 +318,11 @@
         LOG.error("getting attribute "+attName+" of "+oname+" threw an exception", e);
       }
       return;
+    } catch (RuntimeErrorException e) {
+      // RuntimeErrorException happens when an unexpected failure occurs in getAttribute
+      // for example https://issues.apache.org/jira/browse/DAEMON-120
+      LOG.debug("getting attribute "+attName+" of "+oname+" threw an exception", e);
+      return;
     } catch (AttributeNotFoundException e) {
       //Ignored the attribute was not found, which should never happen because the bean
       //just told us that it has this attribute, but if this happens just don't output
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/Node.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/Node.java
index ac57ba4..63ee057 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/Node.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/Node.java
@@ -40,6 +40,7 @@
    * @param location the location
    */
   public void setNetworkLocation(String location);
+
   /** @return this node's name */
   public String getName();
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java
index a8f2781..a61054d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java
@@ -110,7 +110,7 @@
    * @return the path of a node
    */
   public static String getPath(Node node) {
-    return node.getNetworkLocation()+PATH_SEPARATOR_STR+node.getName();
+    return node.getNetworkLocation() + PATH_SEPARATOR_STR + node.getName();
   }
   
   /** @return this node's path as its string representation */
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
index e95ade8..bbddf6f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
@@ -18,10 +18,15 @@
 
 package org.apache.hadoop.security.token;
 
+import com.google.common.collect.Maps;
+
+import java.io.ByteArrayInputStream;
 import java.io.DataInput;
+import java.io.DataInputStream;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.Map;
 import java.util.ServiceLoader;
 
 import org.apache.commons.codec.binary.Base64;
@@ -37,6 +42,7 @@
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparator;
 import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.util.ReflectionUtils;
 
 /**
  * The client-side form of the token.
@@ -45,6 +51,9 @@
 @InterfaceStability.Evolving
 public class Token<T extends TokenIdentifier> implements Writable {
   public static final Log LOG = LogFactory.getLog(Token.class);
+  
+  private static Map<Text, Class<? extends TokenIdentifier>> tokenKindMap;
+  
   private byte[] identifier;
   private byte[] password;
   private Text kind;
@@ -100,13 +109,49 @@
   }
 
   /**
-   * Get the token identifier
-   * @return the token identifier
+   * Get the token identifier's byte representation
+   * @return the token identifier's byte representation
    */
   public byte[] getIdentifier() {
     return identifier;
   }
   
+  private static synchronized Class<? extends TokenIdentifier>
+      getClassForIdentifier(Text kind) {
+    if (tokenKindMap == null) {
+      tokenKindMap = Maps.newHashMap();
+      for (TokenIdentifier id : ServiceLoader.load(TokenIdentifier.class)) {
+        tokenKindMap.put(id.getKind(), id.getClass());
+      }
+    }
+    Class<? extends TokenIdentifier> cls = tokenKindMap.get(kind);
+    if (cls == null) {
+      LOG.warn("Cannot find class for token kind " + kind);
+       return null;
+    }
+    return cls;
+  }
+  
+  /**
+   * Get the token identifier object, or null if it could not be constructed
+   * (because the class could not be loaded, for example).
+   * @return the token identifier, or null
+   * @throws IOException 
+   */
+  @SuppressWarnings("unchecked")
+  public T decodeIdentifier() throws IOException {
+    Class<? extends TokenIdentifier> cls = getClassForIdentifier(getKind());
+    if (cls == null) {
+      return null;
+    }
+    TokenIdentifier tokenIdentifier = ReflectionUtils.newInstance(cls, null);
+    ByteArrayInputStream buf = new ByteArrayInputStream(identifier);
+    DataInputStream in = new DataInputStream(buf);  
+    tokenIdentifier.readFields(in);
+    in.close();
+    return (T) tokenIdentifier;
+  }
+  
   /**
    * Get the token password/secret
    * @return the token password/secret
@@ -260,16 +305,31 @@
       buffer.append(num);
     }
   }
+  
+  private void identifierToString(StringBuilder buffer) {
+    T id = null;
+    try {
+      id = decodeIdentifier();
+    } catch (IOException e) {
+      // handle in the finally block
+    } finally {
+      if (id != null) {
+        buffer.append("(").append(id).append(")");
+      } else {
+        addBinaryBuffer(buffer, identifier);
+      }
+    }
+  }
 
   @Override
   public String toString() {
     StringBuilder buffer = new StringBuilder();
-    buffer.append("Ident: ");
-    addBinaryBuffer(buffer, identifier);
-    buffer.append(", Kind: ");
+    buffer.append("Kind: ");
     buffer.append(kind.toString());
     buffer.append(", Service: ");
     buffer.append(service.toString());
+    buffer.append(", Ident: ");
+    identifierToString(buffer);
     return buffer.toString();
   }
   
diff --git a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
index e7f16fe..d8c731e 100644
--- a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
+++ b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
@@ -48,10 +48,14 @@
 export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"
 
 # Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_NAMENODE_OPTS"
+export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
 export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
 
-export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"
+export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
+
+# The ZKFC does not need a large heap, and keeping it small avoids
+# any potential for long GC pauses
+export HADOOP_ZKFC_OPTS="-Xmx256m $HADOOP_ZKFC_OPTS"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS"
diff --git a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-policy.xml b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-policy.xml
index 2fd9f8d..131fecf 100644
--- a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-policy.xml
+++ b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-policy.xml
@@ -223,6 +223,12 @@
     <description>ACL for HAService protocol used by HAAdmin to manage the
       active and stand-by states of namenode.</description>
   </property>
+  <property>
+    <name>security.zkfc.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for access to the ZK Failover Controller
+    </description>
+  </property>
 
    <property>
       <name>security.mrhs.client.protocol.acl</name>
diff --git a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
index 3470b3e..63e27cf 100644
--- a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
@@ -102,7 +102,7 @@
 #
 #Security appender
 #
-hadoop.security.logger=INFO,console
+hadoop.security.logger=INFO,NullAppender
 hadoop.security.log.maxfilesize=256MB
 hadoop.security.log.maxbackupindex=20
 log4j.category.SecurityLogger=${hadoop.security.logger}
@@ -126,7 +126,7 @@
 #
 # hdfs audit logging
 #
-hdfs.audit.logger=INFO,console
+hdfs.audit.logger=INFO,NullAppender
 hdfs.audit.log.maxfilesize=256MB
 hdfs.audit.log.maxbackupindex=20
 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
@@ -141,7 +141,7 @@
 #
 # mapred audit logging
 #
-mapred.audit.logger=INFO,console
+mapred.audit.logger=INFO,NullAppender
 mapred.audit.log.maxfilesize=256MB
 mapred.audit.log.maxbackupindex=20
 log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/HAServiceProtocol.proto b/hadoop-common-project/hadoop-common/src/main/proto/HAServiceProtocol.proto
index 70ba82b..70ecac8 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/HAServiceProtocol.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/HAServiceProtocol.proto
@@ -27,6 +27,16 @@
   STANDBY = 2;
 }
 
+enum HARequestSource {
+  REQUEST_BY_USER = 0;
+  REQUEST_BY_USER_FORCED = 1;
+  REQUEST_BY_ZKFC = 2;
+}
+
+message HAStateChangeRequestInfoProto {
+  required HARequestSource reqSource = 1;
+}
+
 /**
  * void request
  */
@@ -43,6 +53,7 @@
  * void request
  */
 message TransitionToActiveRequestProto { 
+  required HAStateChangeRequestInfoProto reqInfo = 1;
 }
 
 /**
@@ -55,6 +66,7 @@
  * void request
  */
 message TransitionToStandbyRequestProto { 
+  required HAStateChangeRequestInfoProto reqInfo = 1;
 }
 
 /**
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto b/hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto
index 42dea3b..5065741 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto
@@ -19,7 +19,6 @@
 option java_outer_classname = "RpcPayloadHeaderProtos";
 option java_generate_equals_and_hash = true;
 
-
 /**
  * This is the rpc payload header. It is sent with every rpc call.
  * 
@@ -34,8 +33,6 @@
  *
  */
 
-
-
 /**
  * RpcKind determine the rpcEngine and the serialization of the rpc payload
  */
@@ -54,5 +51,27 @@
 message RpcPayloadHeaderProto { // the header for the RpcRequest
   optional RpcKindProto rpcKind = 1;
   optional RpcPayloadOperationProto rpcOp = 2;
-  optional uint32 callId = 3; // each rpc has a callId that is also used in response
+  required uint32 callId = 3; // each rpc has a callId that is also used in response
+}
+
+enum RpcStatusProto {
+ SUCCESS = 0;  // RPC succeeded
+ ERROR = 1;    // RPC Failed
+ FATAL = 2;    // Fatal error - connection is closed
+}
+
+/**
+ * Rpc Response Header
+ *    - If successfull then the Respose follows after this header
+ *        - length (4 byte int), followed by the response
+ *    - If error or fatal - the exception info follow
+ *        - length (4 byte int) Class name of exception - UTF-8 string
+ *        - length (4 byte int) Stacktrace - UTF-8 string
+ *        - if the strings are null then the length is -1
+ * In case of Fatal error then the respose contains the Serverside's IPC version
+ */
+message RpcResponseHeaderProto {
+  required uint32 callId = 1; // callId used in Request
+  required RpcStatusProto status = 2;
+  optional uint32 serverIpcVersionNum = 3; // in case of an fatal IPC error 
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/ZKFCProtocol.proto b/hadoop-common-project/hadoop-common/src/main/proto/ZKFCProtocol.proto
new file mode 100644
index 0000000..1037b02
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/proto/ZKFCProtocol.proto
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option java_package = "org.apache.hadoop.ha.proto";
+option java_outer_classname = "ZKFCProtocolProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+
+message CedeActiveRequestProto {
+  required uint32 millisToCede = 1;
+}
+
+message CedeActiveResponseProto {
+}
+
+message GracefulFailoverRequestProto {
+}
+
+message GracefulFailoverResponseProto {
+}
+
+
+/**
+ * Protocol provides manual control of the ZK Failover Controllers
+ */
+service ZKFCProtocolService {
+  /**
+   * Request that the service cede its active state, and quit the election
+   * for some amount of time
+   */
+  rpc cedeActive(CedeActiveRequestProto)
+      returns(CedeActiveResponseProto);
+
+
+  rpc gracefulFailover(GracefulFailoverRequestProto)
+      returns(GracefulFailoverResponseProto);
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/hadoop_rpc.proto b/hadoop-common-project/hadoop-common/src/main/proto/hadoop_rpc.proto
index 41d075c..d694e22 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/hadoop_rpc.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/hadoop_rpc.proto
@@ -38,7 +38,21 @@
   /** Bytes corresponding to the client protobuf request */
   optional bytes request = 2;
   
-  /** protocol name of class declaring the called method */ 
+  /** 
+   * RPCs for a particular interface (ie protocol) are done using a
+   * IPC connection that is setup using rpcProxy.
+   * The rpcProxy's has a declared protocol name that is 
+   * sent form client to server at connection time. 
+   * 
+   * Each Rpc call also sends a protocol name 
+   * (called declaringClassprotocolName). This name is usually the same
+   * as the connection protocol name except in some cases. 
+   * For example metaProtocols such ProtocolInfoProto which get metainfo
+   * about the protocol reuse the connection but need to indicate that
+   * the actual protocol is different (i.e. the protocol is
+   * ProtocolInfoProto) since they reuse the connection; in this case
+   * the declaringClassProtocolName field is set to the ProtocolInfoProto
+   */
   required string declaringClassProtocolName = 3;
   
   /** protocol version of class declaring the called method */
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index f94e497..1e72e36 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -944,4 +944,74 @@
   </description>
 </property>
 
+<property>
+  <name>ha.zookeeper.quorum</name>
+  <description>
+    A list of ZooKeeper server addresses, separated by commas, that are
+    to be used by the ZKFailoverController in automatic failover.
+  </description>
+</property>
+
+<property>
+  <name>ha.zookeeper.session-timeout.ms</name>
+  <value>5000</value>
+  <description>
+    The session timeout to use when the ZKFC connects to ZooKeeper.
+    Setting this value to a lower value implies that server crashes
+    will be detected more quickly, but risks triggering failover too
+    aggressively in the case of a transient error or network blip.
+  </description>
+</property>
+
+<property>
+  <name>ha.zookeeper.parent-znode</name>
+  <value>/hadoop-ha</value>
+  <description>
+    The ZooKeeper znode under which the ZK failover controller stores
+    its information. Note that the nameservice ID is automatically
+    appended to this znode, so it is not normally necessary to
+    configure this, even in a federated environment.
+  </description>
+</property>
+
+<property>
+  <name>ha.zookeeper.acl</name>
+  <value>world:anyone:rwcda</value>
+  <description>
+    A comma-separated list of ZooKeeper ACLs to apply to the znodes
+    used by automatic failover. These ACLs are specified in the same
+    format as used by the ZooKeeper CLI.
+
+    If the ACL itself contains secrets, you may instead specify a
+    path to a file, prefixed with the '@' symbol, and the value of
+    this configuration will be loaded from within.
+  </description>
+</property>
+
+<property>
+  <name>ha.zookeeper.auth</name>
+  <value></value>
+  <description>
+    A comma-separated list of ZooKeeper authentications to add when
+    connecting to ZooKeeper. These are specified in the same format
+    as used by the &quot;addauth&quot; command in the ZK CLI. It is
+    important that the authentications specified here are sufficient
+    to access znodes with the ACL specified in ha.zookeeper.acl.
+
+    If the auths contain secrets, you may instead specify a
+    path to a file, prefixed with the '@' symbol, and the value of
+    this configuration will be loaded from within.
+  </description>
+</property>
+
+<!-- Static Web User Filter properties. -->
+<property>
+  <description>
+    The user name to filter as, on static web filters
+    while rendering content. An example use is the HDFS
+    web UI (user to be used for browsing files).
+  </description>
+  <name>hadoop.http.staticuser.user</name>
+  <value>dr.who</value>
+</property>
 </configuration>
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index d15233e..34a1780 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -526,6 +526,29 @@
     }
   }
   
+  public void testDoubleValues() throws IOException {
+    out=new BufferedWriter(new FileWriter(CONFIG));
+    startConfig();
+    appendProperty("test.double1", "3.1415");
+    appendProperty("test.double2", "003.1415");
+    appendProperty("test.double3", "-3.1415");
+    appendProperty("test.double4", " -3.1415 ");
+    appendProperty("test.double5", "xyz-3.1415xyz");
+    endConfig();
+    Path fileResource = new Path(CONFIG);
+    conf.addResource(fileResource);
+    assertEquals(3.1415, conf.getDouble("test.double1", 0.0));
+    assertEquals(3.1415, conf.getDouble("test.double2", 0.0));
+    assertEquals(-3.1415, conf.getDouble("test.double3", 0.0));
+    assertEquals(-3.1415, conf.getDouble("test.double4", 0.0));
+    try {
+      conf.getDouble("test.double5", 0.0);
+      fail("Property had invalid double value, but was read successfully.");
+    } catch (NumberFormatException e) {
+      // pass
+    }
+  }
+  
   public void testGetClass() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -976,6 +999,15 @@
         "Not returning expected number of classes. Number of returned classes ="
             + classes.length, 0, classes.length);
   }
+
+  public void testInvalidSubstitutation() {
+    String key = "test.random.key";
+    String keyExpression = "${" + key + "}";
+    Configuration configuration = new Configuration();
+    configuration.set(key, keyExpression);
+    String value = configuration.get(key);
+    assertTrue("Unexpected value " + value, value.equals(keyExpression));
+  }
   
   public static void main(String[] argv) throws Exception {
     junit.textui.TestRunner.main(new String[]{
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/empty-configuration.xml b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/empty-configuration.xml
index af69c44..e11f1b1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/empty-configuration.xml
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/empty-configuration.xml
@@ -1,3 +1,4 @@
+<?xml version="1.0"?>
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -14,7 +15,6 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<?xml version="1.0"?>
 <configuration>
 </configuration>
 
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
index 6b3963b..f518846 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
@@ -75,7 +75,7 @@
   //A test filter with returns any path containing a "b" 
   final private static PathFilter TEST_X_FILTER = new PathFilter() {
     public boolean accept(Path file) {
-      if(file.getName().contains("x") || file.toString().contains("X"))
+      if(file.getName().contains("x") || file.getName().contains("X"))
         return true;
       else
         return false;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
index b88f5b5..373cebd 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
@@ -75,7 +75,7 @@
   //A test filter with returns any path containing a "b" 
   final private static PathFilter TEST_X_FILTER = new PathFilter() {
     public boolean accept(Path file) {
-      if(file.getName().contains("x") || file.toString().contains("X"))
+      if(file.getName().contains("x") || file.getName().contains("X"))
         return true;
       else
         return false;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
index 035a016..c175d53 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
@@ -113,7 +113,7 @@
 
   public static long createFile(FileSystem fSys, Path path, int numBlocks,
       int blockSize, boolean createParent) throws IOException {
-      return createFile(fSys, path, numBlocks, blockSize, fSys.getDefaultReplication(), true);
+      return createFile(fSys, path, numBlocks, blockSize, fSys.getDefaultReplication(path), true);
   }
 
   public static long createFile(FileSystem fSys, Path path, int numBlocks,
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
index aeb926004..647a583 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
@@ -74,6 +74,11 @@
         Progressable progress) throws IOException {
       return null;
     }
+    public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
+            EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
+            Progressable progress) throws IOException {
+      return null;
+    }
     public boolean mkdirs(Path f) { return false; }
     public FSDataInputStream open(Path f) { return null; }
     public FSDataOutputStream create(Path f) { return null; }
@@ -123,6 +128,15 @@
         Progressable progress) {
       return null;
     }
+    public FSDataOutputStream create(Path f,
+        FsPermission permission,
+        EnumSet<CreateFlag> flags,
+        int bufferSize,
+        short replication,
+        long blockSize,
+        Progressable progress) throws IOException {
+      return null;
+    }
     public String getName() { return null; }
     public boolean delete(Path f) { return false; }
     public short getReplication(Path src) { return 0 ; }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java
index d245804..d1770d3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java
@@ -47,11 +47,9 @@
   }
   
   public void testBlockSize() throws Exception {
-    
-    long newBlockSize = fs.getDefaultBlockSize() * 2;
-    fs.getConf().setLong("fs.s3.block.size", newBlockSize);
-    
     Path file = path("/test/hadoop/file");
+    long newBlockSize = fs.getDefaultBlockSize(file) * 2;
+    fs.getConf().setLong("fs.s3.block.size", newBlockSize);
     createFile(file);
     assertEquals("Double default block size", newBlockSize,
 	fs.getFileStatus(file).getBlockSize());
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
index d98021b..60010e4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
@@ -141,11 +141,11 @@
   public void testBlockSize() throws Exception {
     Path file = path("/test/hadoop/file");
     createFile(file);
-    assertEquals("Default block size", fs.getDefaultBlockSize(),
+    assertEquals("Default block size", fs.getDefaultBlockSize(file),
     fs.getFileStatus(file).getBlockSize());
 
     // Block size is determined at read time
-    long newBlockSize = fs.getDefaultBlockSize() * 2;
+    long newBlockSize = fs.getDefaultBlockSize(file) * 2;
     fs.getConf().setLong("fs.s3n.block.size", newBlockSize);
     assertEquals("Double default block size", newBlockSize,
     fs.getFileStatus(file).getBlockSize());
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegationTokenSupport.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegationTokenSupport.java
new file mode 100644
index 0000000..0d165f1
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegationTokenSupport.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsConstants;
+import org.junit.Test;
+
+/**
+ * Test ViewFileSystem's support for having delegation tokens fetched and cached
+ * for the file system.
+ * 
+ * Currently this class just ensures that getCanonicalServiceName() always
+ * returns <code>null</code> for ViewFileSystem instances.
+ */
+public class TestViewFileSystemDelegationTokenSupport {
+  
+  private static final String MOUNT_TABLE_NAME = "vfs-cluster";
+
+  /**
+   * Regression test for HADOOP-8408.
+   */
+  @Test
+  public void testGetCanonicalServiceNameWithNonDefaultMountTable()
+      throws URISyntaxException, IOException {
+    
+    Configuration conf = new Configuration();
+    ConfigUtil.addLink(conf, MOUNT_TABLE_NAME, "/user", new URI("file:///"));
+    
+    FileSystem viewFs = FileSystem.get(new URI(FsConstants.VIEWFS_SCHEME +
+        "://" + MOUNT_TABLE_NAME), conf);
+    
+    String serviceName = viewFs.getCanonicalServiceName();
+    assertNull(serviceName);
+  }
+  
+  @Test
+  public void testGetCanonicalServiceNameWithDefaultMountTable()
+      throws URISyntaxException, IOException {
+    
+    Configuration conf = new Configuration();
+    ConfigUtil.addLink(conf, "/user", new URI("file:///"));
+    
+    FileSystem viewFs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
+    
+    String serviceName = viewFs.getCanonicalServiceName();
+    assertNull(serviceName);
+  }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ActiveStandbyElectorTestUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ActiveStandbyElectorTestUtil.java
index dc87ebd..bd9b40a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ActiveStandbyElectorTestUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ActiveStandbyElectorTestUtil.java
@@ -19,16 +19,25 @@
 
 import java.util.Arrays;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.zookeeper.KeeperException.NoNodeException;
 import org.apache.zookeeper.data.Stat;
 import org.apache.zookeeper.server.ZooKeeperServer;
 
 public abstract class ActiveStandbyElectorTestUtil {
+  
+  private static final Log LOG = LogFactory.getLog(
+      ActiveStandbyElectorTestUtil.class);
+  private static final long LOG_INTERVAL_MS = 500;
 
   public static void waitForActiveLockData(TestContext ctx,
       ZooKeeperServer zks, String parentDir, byte[] activeData)
       throws Exception {
+    long st = System.currentTimeMillis();
+    long lastPrint = st;
     while (true) {
       if (ctx != null) {
         ctx.checkException();
@@ -42,10 +51,18 @@
             Arrays.equals(activeData, data)) {
           return;
         }
+        if (System.currentTimeMillis() > lastPrint + LOG_INTERVAL_MS) {
+          LOG.info("Cur data: " + StringUtils.byteToHexString(data));
+          lastPrint = System.currentTimeMillis();
+        }
       } catch (NoNodeException nne) {
         if (activeData == null) {
           return;
         }
+        if (System.currentTimeMillis() > lastPrint + LOG_INTERVAL_MS) {
+          LOG.info("Cur data: no node");
+          lastPrint = System.currentTimeMillis();
+        }
       }
       Thread.sleep(50);
     }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
new file mode 100644
index 0000000..858162a
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
@@ -0,0 +1,452 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ha;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.net.Socket;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.zookeeper.PortAssignment;
+import org.apache.zookeeper.TestableZooKeeper;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.Watcher.Event.KeeperState;
+import org.apache.zookeeper.ZKTestCase;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.server.ServerCnxnFactory;
+import org.apache.zookeeper.server.ServerCnxnFactoryAccessor;
+import org.apache.zookeeper.server.ZKDatabase;
+import org.apache.zookeeper.server.ZooKeeperServer;
+import org.apache.zookeeper.server.persistence.FileTxnLog;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Copy-paste of ClientBase from ZooKeeper, but without any of the
+ * JMXEnv verification. There seems to be a bug ZOOKEEPER-1438
+ * which causes spurious failures in the JMXEnv verification when
+ * we run these tests with the upstream ClientBase.
+ */
+public abstract class ClientBaseWithFixes extends ZKTestCase {
+    protected static final Logger LOG = LoggerFactory.getLogger(ClientBaseWithFixes.class);
+
+    public static int CONNECTION_TIMEOUT = 30000;
+    static final File BASETEST =
+        new File(System.getProperty("build.test.dir", "build"));
+
+    protected String hostPort = "127.0.0.1:" + PortAssignment.unique();
+    protected int maxCnxns = 0;
+    protected ServerCnxnFactory serverFactory = null;
+    protected File tmpDir = null;
+    
+    long initialFdCount;
+    
+    public ClientBaseWithFixes() {
+        super();
+    }
+
+    /**
+     * In general don't use this. Only use in the special case that you
+     * want to ignore results (for whatever reason) in your test. Don't
+     * use empty watchers in real code!
+     *
+     */
+    protected class NullWatcher implements Watcher {
+        public void process(WatchedEvent event) { /* nada */ }
+    }
+
+    protected static class CountdownWatcher implements Watcher {
+        // XXX this doesn't need to be volatile! (Should probably be final)
+        volatile CountDownLatch clientConnected;
+        volatile boolean connected;
+
+        public CountdownWatcher() {
+            reset();
+        }
+        synchronized public void reset() {
+            clientConnected = new CountDownLatch(1);
+            connected = false;
+        }
+        synchronized public void process(WatchedEvent event) {
+            if (event.getState() == KeeperState.SyncConnected ||
+                event.getState() == KeeperState.ConnectedReadOnly) {
+                connected = true;
+                notifyAll();
+                clientConnected.countDown();
+            } else {
+                connected = false;
+                notifyAll();
+            }
+        }
+        synchronized boolean isConnected() {
+            return connected;
+        }
+        synchronized void waitForConnected(long timeout) throws InterruptedException, TimeoutException {
+            long expire = System.currentTimeMillis() + timeout;
+            long left = timeout;
+            while(!connected && left > 0) {
+                wait(left);
+                left = expire - System.currentTimeMillis();
+            }
+            if (!connected) {
+                throw new TimeoutException("Did not connect");
+
+            }
+        }
+        synchronized void waitForDisconnected(long timeout) throws InterruptedException, TimeoutException {
+            long expire = System.currentTimeMillis() + timeout;
+            long left = timeout;
+            while(connected && left > 0) {
+                wait(left);
+                left = expire - System.currentTimeMillis();
+            }
+            if (connected) {
+                throw new TimeoutException("Did not disconnect");
+
+            }
+        }
+    }
+
+    protected TestableZooKeeper createClient()
+        throws IOException, InterruptedException
+    {
+        return createClient(hostPort);
+    }
+
+    protected TestableZooKeeper createClient(String hp)
+        throws IOException, InterruptedException
+    {
+        CountdownWatcher watcher = new CountdownWatcher();
+        return createClient(watcher, hp);
+    }
+
+    private LinkedList<ZooKeeper> allClients;
+    private boolean allClientsSetup = false;
+
+    protected TestableZooKeeper createClient(CountdownWatcher watcher, String hp)
+        throws IOException, InterruptedException
+    {
+        return createClient(watcher, hp, CONNECTION_TIMEOUT);
+    }
+
+    protected TestableZooKeeper createClient(CountdownWatcher watcher,
+            String hp, int timeout)
+        throws IOException, InterruptedException
+    {
+        watcher.reset();
+        TestableZooKeeper zk = new TestableZooKeeper(hp, timeout, watcher);
+        if (!watcher.clientConnected.await(timeout, TimeUnit.MILLISECONDS))
+        {
+            Assert.fail("Unable to connect to server");
+        }
+        synchronized(this) {
+            if (!allClientsSetup) {
+                LOG.error("allClients never setup");
+                Assert.fail("allClients never setup");
+            }
+            if (allClients != null) {
+                allClients.add(zk);
+            } else {
+                // test done - close the zk, not needed
+                zk.close();
+            }
+        }
+
+
+        return zk;
+    }
+
+    public static class HostPort {
+        String host;
+        int port;
+        public HostPort(String host, int port) {
+            this.host = host;
+            this.port = port;
+        }
+    }
+    public static List<HostPort> parseHostPortList(String hplist) {
+        ArrayList<HostPort> alist = new ArrayList<HostPort>();
+        for (String hp: hplist.split(",")) {
+            int idx = hp.lastIndexOf(':');
+            String host = hp.substring(0, idx);
+            int port;
+            try {
+                port = Integer.parseInt(hp.substring(idx + 1));
+            } catch(RuntimeException e) {
+                throw new RuntimeException("Problem parsing " + hp + e.toString());
+            }
+            alist.add(new HostPort(host,port));
+        }
+        return alist;
+    }
+
+    /**
+     * Send the 4letterword
+     * @param host the destination host
+     * @param port the destination port
+     * @param cmd the 4letterword
+     * @return
+     * @throws IOException
+     */
+    public static String send4LetterWord(String host, int port, String cmd)
+        throws IOException
+    {
+        LOG.info("connecting to " + host + " " + port);
+        Socket sock = new Socket(host, port);
+        BufferedReader reader = null;
+        try {
+            OutputStream outstream = sock.getOutputStream();
+            outstream.write(cmd.getBytes());
+            outstream.flush();
+            // this replicates NC - close the output stream before reading
+            sock.shutdownOutput();
+
+            reader =
+                new BufferedReader(
+                        new InputStreamReader(sock.getInputStream()));
+            StringBuilder sb = new StringBuilder();
+            String line;
+            while((line = reader.readLine()) != null) {
+                sb.append(line + "\n");
+            }
+            return sb.toString();
+        } finally {
+            sock.close();
+            if (reader != null) {
+                reader.close();
+            }
+        }
+    }
+
+    public static boolean waitForServerUp(String hp, long timeout) {
+        long start = System.currentTimeMillis();
+        while (true) {
+            try {
+                // if there are multiple hostports, just take the first one
+                HostPort hpobj = parseHostPortList(hp).get(0);
+                String result = send4LetterWord(hpobj.host, hpobj.port, "stat");
+                if (result.startsWith("Zookeeper version:") &&
+                        !result.contains("READ-ONLY")) {
+                    return true;
+                }
+            } catch (IOException e) {
+                // ignore as this is expected
+                LOG.info("server " + hp + " not up " + e);
+            }
+
+            if (System.currentTimeMillis() > start + timeout) {
+                break;
+            }
+            try {
+                Thread.sleep(250);
+            } catch (InterruptedException e) {
+                // ignore
+            }
+        }
+        return false;
+    }
+    public static boolean waitForServerDown(String hp, long timeout) {
+        long start = System.currentTimeMillis();
+        while (true) {
+            try {
+                HostPort hpobj = parseHostPortList(hp).get(0);
+                send4LetterWord(hpobj.host, hpobj.port, "stat");
+            } catch (IOException e) {
+                return true;
+            }
+
+            if (System.currentTimeMillis() > start + timeout) {
+                break;
+            }
+            try {
+                Thread.sleep(250);
+            } catch (InterruptedException e) {
+                // ignore
+            }
+        }
+        return false;
+    }
+
+    public static File createTmpDir() throws IOException {
+        return createTmpDir(BASETEST);
+    }
+    static File createTmpDir(File parentDir) throws IOException {
+        File tmpFile = File.createTempFile("test", ".junit", parentDir);
+        // don't delete tmpFile - this ensures we don't attempt to create
+        // a tmpDir with a duplicate name
+        File tmpDir = new File(tmpFile + ".dir");
+        Assert.assertFalse(tmpDir.exists()); // never true if tmpfile does it's job
+        Assert.assertTrue(tmpDir.mkdirs());
+
+        return tmpDir;
+    }
+    private static int getPort(String hostPort) {
+        String[] split = hostPort.split(":");
+        String portstr = split[split.length-1];
+        String[] pc = portstr.split("/");
+        if (pc.length > 1) {
+            portstr = pc[0];
+        }
+        return Integer.parseInt(portstr);
+    }
+
+    static ServerCnxnFactory createNewServerInstance(File dataDir,
+            ServerCnxnFactory factory, String hostPort, int maxCnxns)
+        throws IOException, InterruptedException
+    {
+        ZooKeeperServer zks = new ZooKeeperServer(dataDir, dataDir, 3000);
+        final int PORT = getPort(hostPort);
+        if (factory == null) {
+            factory = ServerCnxnFactory.createFactory(PORT, maxCnxns);
+        }
+        factory.startup(zks);
+        Assert.assertTrue("waiting for server up",
+                   ClientBaseWithFixes.waitForServerUp("127.0.0.1:" + PORT,
+                                              CONNECTION_TIMEOUT));
+
+        return factory;
+    }
+
+    static void shutdownServerInstance(ServerCnxnFactory factory,
+            String hostPort)
+    {
+        if (factory != null) {
+            ZKDatabase zkDb;
+            {
+                ZooKeeperServer zs = getServer(factory);
+        
+                zkDb = zs.getZKDatabase();
+            }
+            factory.shutdown();
+            try {
+                zkDb.close();
+            } catch (IOException ie) {
+                LOG.warn("Error closing logs ", ie);
+            }
+            final int PORT = getPort(hostPort);
+
+            Assert.assertTrue("waiting for server down",
+                       ClientBaseWithFixes.waitForServerDown("127.0.0.1:" + PORT,
+                                                    CONNECTION_TIMEOUT));
+        }
+    }
+
+    /**
+     * Test specific setup
+     */
+    public static void setupTestEnv() {
+        // during the tests we run with 100K prealloc in the logs.
+        // on windows systems prealloc of 64M was seen to take ~15seconds
+        // resulting in test Assert.failure (client timeout on first session).
+        // set env and directly in order to handle static init/gc issues
+        System.setProperty("zookeeper.preAllocSize", "100");
+        FileTxnLog.setPreallocSize(100 * 1024);
+    }
+
+    protected void setUpAll() throws Exception {
+        allClients = new LinkedList<ZooKeeper>();
+        allClientsSetup = true;
+    }
+
+    @Before
+    public void setUp() throws Exception {
+        BASETEST.mkdirs();
+
+        setupTestEnv();
+
+        setUpAll();
+
+        tmpDir = createTmpDir(BASETEST);
+
+        startServer();
+
+        LOG.info("Client test setup finished");
+    }
+
+    protected void startServer() throws Exception {
+        LOG.info("STARTING server");
+        serverFactory = createNewServerInstance(tmpDir, serverFactory, hostPort, maxCnxns);
+    }
+
+    protected void stopServer() throws Exception {
+        LOG.info("STOPPING server");
+        shutdownServerInstance(serverFactory, hostPort);
+        serverFactory = null;
+    }
+
+
+    protected static ZooKeeperServer getServer(ServerCnxnFactory fac) {
+        ZooKeeperServer zs = ServerCnxnFactoryAccessor.getZkServer(fac);
+
+        return zs;
+    }
+
+    protected void tearDownAll() throws Exception {
+        synchronized (this) {
+            if (allClients != null) for (ZooKeeper zk : allClients) {
+                try {
+                    if (zk != null)
+                        zk.close();
+                } catch (InterruptedException e) {
+                    LOG.warn("ignoring interrupt", e);
+                }
+            }
+            allClients = null;
+        }
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        LOG.info("tearDown starting");
+
+        tearDownAll();
+
+        stopServer();
+
+        if (tmpDir != null) {
+            Assert.assertTrue("delete " + tmpDir.toString(), recursiveDelete(tmpDir));
+        }
+
+        // This has to be set to null when the same instance of this class is reused between test cases
+        serverFactory = null;
+    }
+
+    public static boolean recursiveDelete(File d) {
+        if (d.isDirectory()) {
+            File children[] = d.listFiles();
+            for (File f : children) {
+                Assert.assertTrue("delete " + f.toString(), recursiveDelete(f));
+            }
+        }
+        return d.delete();
+    }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java
index a8cd3d6..c38bc53 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java
@@ -22,6 +22,8 @@
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.security.AccessControlException;
@@ -34,13 +36,19 @@
  * a mock implementation.
  */
 class DummyHAService extends HAServiceTarget {
+  public static final Log LOG = LogFactory.getLog(DummyHAService.class);
+  private static final String DUMMY_FENCE_KEY = "dummy.fence.key";
   volatile HAServiceState state;
   HAServiceProtocol proxy;
+  ZKFCProtocol zkfcProxy = null;
   NodeFencer fencer;
   InetSocketAddress address;
   boolean isHealthy = true;
   boolean actUnreachable = false;
-  boolean failToBecomeActive;
+  boolean failToBecomeActive, failToBecomeStandby, failToFence;
+  
+  DummySharedResource sharedResource;
+  public int fenceCount = 0;
   
   static ArrayList<DummyHAService> instances = Lists.newArrayList();
   int index;
@@ -48,7 +56,14 @@
   DummyHAService(HAServiceState state, InetSocketAddress address) {
     this.state = state;
     this.proxy = makeMock();
-    this.fencer = Mockito.mock(NodeFencer.class);
+    try {
+      Configuration conf = new Configuration();
+      conf.set(DUMMY_FENCE_KEY, DummyFencer.class.getName()); 
+      this.fencer = Mockito.spy(
+          NodeFencer.create(conf, DUMMY_FENCE_KEY));
+    } catch (BadFencingConfigurationException e) {
+      throw new RuntimeException(e);
+    }
     this.address = address;
     synchronized (instances) {
       instances.add(this);
@@ -56,6 +71,10 @@
     }
   }
   
+  public void setSharedResource(DummySharedResource rsrc) {
+    this.sharedResource = rsrc;
+  }
+  
   private HAServiceProtocol makeMock() {
     return Mockito.spy(new MockHAProtocolImpl());
   }
@@ -66,12 +85,24 @@
   }
 
   @Override
+  public InetSocketAddress getZKFCAddress() {
+    return null;
+  }
+
+  @Override
   public HAServiceProtocol getProxy(Configuration conf, int timeout)
       throws IOException {
     return proxy;
   }
   
   @Override
+  public ZKFCProtocol getZKFCProxy(Configuration conf, int timeout)
+      throws IOException {
+    assert zkfcProxy != null;
+    return zkfcProxy;
+  }
+  
+  @Override
   public NodeFencer getFencer() {
     return fencer;
   }
@@ -81,6 +112,11 @@
   }
   
   @Override
+  public boolean isAutoFailoverEnabled() {
+    return true;
+  }
+
+  @Override
   public String toString() {
     return "DummyHAService #" + index;
   }
@@ -101,20 +137,28 @@
     }
     
     @Override
-    public void transitionToActive() throws ServiceFailedException,
+    public void transitionToActive(StateChangeRequestInfo req) throws ServiceFailedException,
         AccessControlException, IOException {
       checkUnreachable();
       if (failToBecomeActive) {
         throw new ServiceFailedException("injected failure");
       }
-    
+      if (sharedResource != null) {
+        sharedResource.take(DummyHAService.this);
+      }
       state = HAServiceState.ACTIVE;
     }
     
     @Override
-    public void transitionToStandby() throws ServiceFailedException,
+    public void transitionToStandby(StateChangeRequestInfo req) throws ServiceFailedException,
         AccessControlException, IOException {
       checkUnreachable();
+      if (failToBecomeStandby) {
+        throw new ServiceFailedException("injected failure");
+      }
+      if (sharedResource != null) {
+        sharedResource.release(DummyHAService.this);
+      }
       state = HAServiceState.STANDBY;
     }
     
@@ -138,4 +182,26 @@
     public void close() throws IOException {
     }
   }
+  
+  public static class DummyFencer implements FenceMethod {
+    public void checkArgs(String args) throws BadFencingConfigurationException {
+    }
+
+    @Override
+    public boolean tryFence(HAServiceTarget target, String args)
+        throws BadFencingConfigurationException {
+      LOG.info("tryFence(" + target + ")");
+      DummyHAService svc = (DummyHAService)target;
+      synchronized (svc) {
+        svc.fenceCount++;
+      }
+      if (svc.failToFence) {
+        LOG.info("Injected failure to fence");
+        return false;
+      }
+      svc.sharedResource.release(svc);
+      return true;
+    }
+  }
+
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummySharedResource.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummySharedResource.java
new file mode 100644
index 0000000..a7cf41d
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummySharedResource.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha;
+
+import org.junit.Assert;
+
+/**
+ * A fake shared resource, for use in automatic failover testing.
+ * This simulates a real shared resource like a shared edit log.
+ * When the {@link DummyHAService} instances change state or get
+ * fenced, they notify the shared resource, which asserts that
+ * we never have two HA services who think they're holding the
+ * resource at the same time.
+ */
+public class DummySharedResource {
+  private DummyHAService holder = null;
+  private int violations = 0;
+  
+  public synchronized void take(DummyHAService newHolder) {
+    if (holder == null || holder == newHolder) {
+      holder = newHolder;
+    } else {
+      violations++;
+      throw new IllegalStateException("already held by: " + holder);
+    }
+  }
+  
+  public synchronized void release(DummyHAService oldHolder) {
+    if (holder == oldHolder) {
+      holder = null;
+    }
+  }
+  
+  public synchronized void assertNoViolations() {
+    Assert.assertEquals(0, violations);
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
new file mode 100644
index 0000000..1db7924
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
@@ -0,0 +1,319 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.HealthMonitor.State;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
+import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
+import org.apache.zookeeper.KeeperException.NoNodeException;
+import org.apache.zookeeper.data.Stat;
+import org.apache.zookeeper.server.ZooKeeperServer;
+
+import com.google.common.base.Preconditions;
+import com.google.common.primitives.Ints;
+
+/**
+ * Harness for starting two dummy ZK FailoverControllers, associated with
+ * DummyHAServices. This harness starts two such ZKFCs, designated by
+ * indexes 0 and 1, and provides utilities for building tests around them.
+ */
+public class MiniZKFCCluster {
+  private final TestContext ctx;
+  private final ZooKeeperServer zks;
+
+  private DummyHAService svcs[];
+  private DummyZKFCThread thrs[];
+  private Configuration conf;
+  
+  private DummySharedResource sharedResource = new DummySharedResource();
+  
+  private static final Log LOG = LogFactory.getLog(MiniZKFCCluster.class);
+  
+  public MiniZKFCCluster(Configuration conf, ZooKeeperServer zks) {
+    this.conf = conf;
+    // Fast check interval so tests run faster
+    conf.setInt(CommonConfigurationKeys.HA_HM_CHECK_INTERVAL_KEY, 50);
+    conf.setInt(CommonConfigurationKeys.HA_HM_CONNECT_RETRY_INTERVAL_KEY, 50);
+    conf.setInt(CommonConfigurationKeys.HA_HM_SLEEP_AFTER_DISCONNECT_KEY, 50);
+    svcs = new DummyHAService[2];
+    svcs[0] = new DummyHAService(HAServiceState.INITIALIZING,
+        new InetSocketAddress("svc1", 1234));
+    svcs[0].setSharedResource(sharedResource);
+    svcs[1] = new DummyHAService(HAServiceState.INITIALIZING,
+        new InetSocketAddress("svc2", 1234));
+    svcs[1].setSharedResource(sharedResource);
+    
+    this.ctx = new TestContext();
+    this.zks = zks;
+  }
+  
+  /**
+   * Set up two services and their failover controllers. svc1 is started
+   * first, so that it enters ACTIVE state, and then svc2 is started,
+   * which enters STANDBY
+   */
+  public void start() throws Exception {
+    // Format the base dir, should succeed
+    thrs = new DummyZKFCThread[2];
+    thrs[0] = new DummyZKFCThread(ctx, svcs[0]);
+    assertEquals(0, thrs[0].zkfc.run(new String[]{"-formatZK"}));
+    ctx.addThread(thrs[0]);
+    thrs[0].start();
+    
+    LOG.info("Waiting for svc0 to enter active state");
+    waitForHAState(0, HAServiceState.ACTIVE);
+    
+    LOG.info("Adding svc1");
+    thrs[1] = new DummyZKFCThread(ctx, svcs[1]);
+    thrs[1].start();
+    waitForHAState(1, HAServiceState.STANDBY);
+  }
+  
+  /**
+   * Stop the services.
+   * @throws Exception if either of the services had encountered a fatal error
+   */
+  public void stop() throws Exception {
+    for (DummyZKFCThread thr : thrs) {
+      if (thr != null) {
+        thr.interrupt();
+      }
+    }
+    if (ctx != null) {
+      ctx.stop();
+    }
+    sharedResource.assertNoViolations();
+  }
+
+  /**
+   * @return the TestContext implementation used internally. This allows more
+   * threads to be added to the context, etc.
+   */
+  public TestContext getTestContext() {
+    return ctx;
+  }
+  
+  public DummyHAService getService(int i) {
+    return svcs[i];
+  }
+
+  public ActiveStandbyElector getElector(int i) {
+    return thrs[i].zkfc.getElectorForTests();
+  }
+
+  public DummyZKFC getZkfc(int i) {
+    return thrs[i].zkfc;
+  }
+  
+  public void setHealthy(int idx, boolean healthy) {
+    svcs[idx].isHealthy = healthy;
+  }
+
+  public void setFailToBecomeActive(int idx, boolean doFail) {
+    svcs[idx].failToBecomeActive = doFail;
+  }
+
+  public void setFailToBecomeStandby(int idx, boolean doFail) {
+    svcs[idx].failToBecomeStandby = doFail;
+  }
+  
+  public void setFailToFence(int idx, boolean doFail) {
+    svcs[idx].failToFence = doFail;
+  }
+  
+  public void setUnreachable(int idx, boolean unreachable) {
+    svcs[idx].actUnreachable = unreachable;
+  }
+
+  /**
+   * Wait for the given HA service to enter the given HA state.
+   */
+  public void waitForHAState(int idx, HAServiceState state)
+      throws Exception {
+    DummyHAService svc = getService(idx);
+    while (svc.state != state) {
+      ctx.checkException();
+      Thread.sleep(50);
+    }
+  }
+  
+  /**
+   * Wait for the ZKFC to be notified of a change in health state.
+   */
+  public void waitForHealthState(int idx, State state)
+      throws Exception {
+    ZKFCTestUtil.waitForHealthState(thrs[idx].zkfc, state, ctx);
+  }
+
+  /**
+   * Wait for the given elector to enter the given elector state.
+   * @param idx the service index (0 or 1)
+   * @param state the state to wait for
+   * @throws Exception if it times out, or an exception occurs on one
+   * of the ZKFC threads while waiting.
+   */
+  public void waitForElectorState(int idx,
+      ActiveStandbyElector.State state) throws Exception {
+    ActiveStandbyElectorTestUtil.waitForElectorState(ctx,
+        getElector(idx), state);
+  }
+
+  
+
+  /**
+   * Expire the ZK session of the given service. This requires
+   * (and asserts) that the given service be the current active.
+   * @throws NoNodeException if no service holds the lock
+   */
+  public void expireActiveLockHolder(int idx)
+      throws NoNodeException {
+    Stat stat = new Stat();
+    byte[] data = zks.getZKDatabase().getData(
+        DummyZKFC.LOCK_ZNODE, stat, null);
+    
+    assertArrayEquals(Ints.toByteArray(svcs[idx].index), data);
+    long session = stat.getEphemeralOwner();
+    LOG.info("Expiring svc " + idx + "'s zookeeper session " + session);
+    zks.closeSession(session);
+  }
+  
+
+  /**
+   * Wait for the given HA service to become the active lock holder.
+   * If the passed svc is null, waits for there to be no active
+   * lock holder.
+   */
+  public void waitForActiveLockHolder(Integer idx)
+      throws Exception {
+    DummyHAService svc = idx == null ? null : svcs[idx];
+    ActiveStandbyElectorTestUtil.waitForActiveLockData(ctx, zks,
+        DummyZKFC.SCOPED_PARENT_ZNODE,
+        (idx == null) ? null : Ints.toByteArray(svc.index));
+  }
+  
+
+  /**
+   * Expires the ZK session associated with service 'fromIdx', and waits
+   * until service 'toIdx' takes over.
+   * @throws Exception if the target service does not become active
+   */
+  public void expireAndVerifyFailover(int fromIdx, int toIdx)
+      throws Exception {
+    Preconditions.checkArgument(fromIdx != toIdx);
+    
+    getElector(fromIdx).preventSessionReestablishmentForTests();
+    try {
+      expireActiveLockHolder(fromIdx);
+      
+      waitForHAState(fromIdx, HAServiceState.STANDBY);
+      waitForHAState(toIdx, HAServiceState.ACTIVE);
+    } finally {
+      getElector(fromIdx).allowSessionReestablishmentForTests();
+    }
+  }
+
+  /**
+   * Test-thread which runs a ZK Failover Controller corresponding
+   * to a given dummy service.
+   */
+  private class DummyZKFCThread extends TestingThread {
+    private final DummyZKFC zkfc;
+
+    public DummyZKFCThread(TestContext ctx, DummyHAService svc) {
+      super(ctx);
+      this.zkfc = new DummyZKFC(conf, svc);
+    }
+
+    @Override
+    public void doWork() throws Exception {
+      try {
+        assertEquals(0, zkfc.run(new String[0]));
+      } catch (InterruptedException ie) {
+        // Interrupted by main thread, that's OK.
+      }
+    }
+  }
+  
+  static class DummyZKFC extends ZKFailoverController {
+    private static final String DUMMY_CLUSTER = "dummy-cluster";
+    public static final String SCOPED_PARENT_ZNODE =
+      ZKFailoverController.ZK_PARENT_ZNODE_DEFAULT + "/" +
+      DUMMY_CLUSTER;
+    private static final String LOCK_ZNODE = 
+      SCOPED_PARENT_ZNODE + "/" + ActiveStandbyElector.LOCK_FILENAME;
+    private final DummyHAService localTarget;
+    
+    public DummyZKFC(Configuration conf, DummyHAService localTarget) {
+      super(conf, localTarget);
+      this.localTarget = localTarget;
+    }
+
+    @Override
+    protected byte[] targetToData(HAServiceTarget target) {
+      return Ints.toByteArray(((DummyHAService)target).index);
+    }
+    
+    @Override
+    protected HAServiceTarget dataToTarget(byte[] data) {
+      int index = Ints.fromByteArray(data);
+      return DummyHAService.getInstance(index);
+    }
+
+    @Override
+    protected void loginAsFCUser() throws IOException {
+    }
+
+    @Override
+    protected String getScopeInsideParentNode() {
+      return DUMMY_CLUSTER;
+    }
+
+    @Override
+    protected void checkRpcAdminAccess() throws AccessControlException {
+    }
+
+    @Override
+    protected InetSocketAddress getRpcAddressToBindTo() {
+      return new InetSocketAddress(0);
+    }
+
+    @Override
+    protected void initRPC() throws IOException {
+      super.initRPC();
+      localTarget.zkfcProxy = this.getRpcServerForTests();
+    }
+
+    @Override
+    protected PolicyProvider getPolicyProvider() {
+      return null;
+    }
+  }
+}
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java
index b9786ae..2eba967 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.ha;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.List;
 
 import org.apache.zookeeper.AsyncCallback;
@@ -40,6 +41,7 @@
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveNotFoundException;
+import org.apache.hadoop.ha.HAZKUtil.ZKAuthInfo;
 
 public class TestActiveStandbyElector {
 
@@ -51,9 +53,12 @@
   private ActiveStandbyElectorTester elector;
 
   class ActiveStandbyElectorTester extends ActiveStandbyElector {
+    private int sleptFor = 0;
+    
     ActiveStandbyElectorTester(String hostPort, int timeout, String parent,
         List<ACL> acl, ActiveStandbyElectorCallback app) throws IOException {
-      super(hostPort, timeout, parent, acl, app);
+      super(hostPort, timeout, parent, acl,
+          Collections.<ZKAuthInfo>emptyList(), app);
     }
 
     @Override
@@ -61,6 +66,14 @@
       ++count;
       return mockZK;
     }
+    
+    @Override
+    protected void sleepFor(int ms) {
+      // don't sleep in unit tests! Instead, just record the amount of
+      // time slept
+      LOG.info("Would have slept for " + ms + "ms");
+      sleptFor += ms;
+    }
   }
 
   private static final String ZK_PARENT_NAME = "/parent/node";
@@ -147,6 +160,68 @@
   }
   
   /**
+   * Verify that, when the callback fails to enter active state,
+   * the elector rejoins the election after sleeping for a short period.
+   */
+  @Test
+  public void testFailToBecomeActive() throws Exception {
+    mockNoPriorActive();
+    elector.joinElection(data);
+    Assert.assertEquals(0, elector.sleptFor);
+    
+    Mockito.doThrow(new ServiceFailedException("failed to become active"))
+        .when(mockApp).becomeActive();
+    elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK,
+        ZK_LOCK_NAME);
+    // Should have tried to become active
+    Mockito.verify(mockApp).becomeActive();
+    
+    // should re-join
+    Mockito.verify(mockZK, Mockito.times(2)).create(ZK_LOCK_NAME, data,
+        Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, elector, mockZK);
+    Assert.assertEquals(2, count);
+    Assert.assertTrue(elector.sleptFor > 0);
+  }
+  
+  /**
+   * Verify that, when the callback fails to enter active state, after
+   * a ZK disconnect (i.e from the StatCallback), that the elector rejoins
+   * the election after sleeping for a short period.
+   */
+  @Test
+  public void testFailToBecomeActiveAfterZKDisconnect() throws Exception {
+    mockNoPriorActive();
+    elector.joinElection(data);
+    Assert.assertEquals(0, elector.sleptFor);
+
+    elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
+        ZK_LOCK_NAME);
+    Mockito.verify(mockZK, Mockito.times(2)).create(ZK_LOCK_NAME, data,
+        Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, elector, mockZK);
+
+    elector.processResult(Code.NODEEXISTS.intValue(), ZK_LOCK_NAME, mockZK,
+        ZK_LOCK_NAME);
+    verifyExistCall(1);
+
+    Stat stat = new Stat();
+    stat.setEphemeralOwner(1L);
+    Mockito.when(mockZK.getSessionId()).thenReturn(1L);
+
+    // Fake failure to become active from within the stat callback
+    Mockito.doThrow(new ServiceFailedException("fail to become active"))
+        .when(mockApp).becomeActive();
+    elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK, stat);
+    Mockito.verify(mockApp, Mockito.times(1)).becomeActive();
+    
+    // should re-join
+    Mockito.verify(mockZK, Mockito.times(3)).create(ZK_LOCK_NAME, data,
+        Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, elector, mockZK);
+    Assert.assertEquals(2, count);
+    Assert.assertTrue(elector.sleptFor > 0);
+  }
+
+  
+  /**
    * Verify that, if there is a record of a prior active node, the
    * elector asks the application to fence it before becoming active.
    */
@@ -314,6 +389,7 @@
    */
   @Test
   public void testStatNodeRetry() {
+    elector.joinElection(data);
     elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
         (Stat) null);
     elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
@@ -334,6 +410,7 @@
    */
   @Test
   public void testStatNodeError() {
+    elector.joinElection(data);
     elector.processResult(Code.RUNTIMEINCONSISTENCY.intValue(), ZK_LOCK_NAME,
         mockZK, (Stat) null);
     Mockito.verify(mockApp, Mockito.times(0)).enterNeutralMode();
@@ -517,6 +594,8 @@
    */
   @Test
   public void testQuitElection() throws Exception {
+    elector.joinElection(data);
+    Mockito.verify(mockZK, Mockito.times(0)).close();
     elector.quitElection(true);
     Mockito.verify(mockZK, Mockito.times(1)).close();
     // no watches added
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
index 3a0fa5f..d51d5fa 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
@@ -21,15 +21,16 @@
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
-import java.io.File;
+import java.util.Collections;
 import java.util.UUID;
 
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
+import org.apache.hadoop.ha.ActiveStandbyElector.State;
+import org.apache.hadoop.ha.HAZKUtil.ZKAuthInfo;
 import org.apache.log4j.Level;
 import org.apache.zookeeper.ZooDefs.Ids;
 import org.apache.zookeeper.server.ZooKeeperServer;
-import org.apache.zookeeper.test.ClientBase;
 import org.junit.Test;
 import org.mockito.AdditionalMatchers;
 import org.mockito.Mockito;
@@ -39,7 +40,7 @@
 /**
  * Test for {@link ActiveStandbyElector} using real zookeeper.
  */
-public class TestActiveStandbyElectorRealZK extends ClientBase {
+public class TestActiveStandbyElectorRealZK extends ClientBaseWithFixes {
   static final int NUM_ELECTORS = 2;
   
   static {
@@ -58,8 +59,6 @@
   
   @Override
   public void setUp() throws Exception {
-    // build.test.dir is used by zookeeper
-    new File(System.getProperty("build.test.dir", "build")).mkdirs();
     super.setUp();
     
     zkServer = getServer(serverFactory);
@@ -68,7 +67,8 @@
       cbs[i] =  Mockito.mock(ActiveStandbyElectorCallback.class);
       appDatas[i] = Ints.toByteArray(i);
       electors[i] = new ActiveStandbyElector(
-          hostPort, 5000, PARENT_DIR, Ids.OPEN_ACL_UNSAFE, cbs[i]);
+          hostPort, 5000, PARENT_DIR, Ids.OPEN_ACL_UNSAFE,
+          Collections.<ZKAuthInfo>emptyList(), cbs[i]);
     }
   }
   
@@ -196,4 +196,63 @@
 
     checkFatalsAndReset();
   }
+  
+  @Test(timeout=15000)
+  public void testHandleSessionExpirationOfStandby() throws Exception {
+    // Let elector 0 be active
+    electors[0].ensureParentZNode();
+    electors[0].joinElection(appDatas[0]);
+    ZooKeeperServer zks = getServer(serverFactory);
+    ActiveStandbyElectorTestUtil.waitForActiveLockData(null,
+        zks, PARENT_DIR, appDatas[0]);
+    Mockito.verify(cbs[0], Mockito.timeout(1000)).becomeActive();
+    checkFatalsAndReset();
+    
+    // Let elector 1 be standby
+    electors[1].joinElection(appDatas[1]);
+    ActiveStandbyElectorTestUtil.waitForElectorState(null, electors[1],
+        State.STANDBY);
+    
+    LOG.info("========================== Expiring standby's session");
+    zks.closeSession(electors[1].getZKSessionIdForTests());
+
+    // Should enter neutral mode when disconnected
+    Mockito.verify(cbs[1], Mockito.timeout(1000)).enterNeutralMode();
+
+    // Should re-join the election and go back to STANDBY
+    ActiveStandbyElectorTestUtil.waitForElectorState(null, electors[1],
+        State.STANDBY);
+    checkFatalsAndReset();
+    
+    LOG.info("========================== Quitting election");
+    electors[1].quitElection(false);
+
+    // Double check that we don't accidentally re-join the election
+    // by quitting elector 0 and ensuring elector 1 doesn't become active
+    electors[0].quitElection(false);
+    
+    // due to receiving the "expired" event.
+    Thread.sleep(1000);
+    Mockito.verify(cbs[1], Mockito.never()).becomeActive();
+    ActiveStandbyElectorTestUtil.waitForActiveLockData(null,
+        zks, PARENT_DIR, null);
+
+    checkFatalsAndReset();
+  }
+
+  @Test(timeout=15000)
+  public void testDontJoinElectionOnDisconnectAndReconnect() throws Exception {
+    electors[0].ensureParentZNode();
+
+    stopServer();
+    ActiveStandbyElectorTestUtil.waitForElectorState(
+        null, electors[0], State.NEUTRAL);
+    startServer();
+    waitForServerUp(hostPort, CONNECTION_TIMEOUT);
+    // Have to sleep to allow time for the clients to reconnect.
+    Thread.sleep(2000);
+    Mockito.verify(cbs[0], Mockito.never()).becomeActive();
+    Mockito.verify(cbs[1], Mockito.never()).becomeActive();
+    checkFatalsAndReset();
+  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java
index 7d30bdf..791aaad 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java
@@ -27,11 +27,12 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
 import org.apache.hadoop.ha.TestNodeFencer.AlwaysSucceedFencer;
 import org.apache.hadoop.ha.TestNodeFencer.AlwaysFailFencer;
 import static org.apache.hadoop.ha.TestNodeFencer.setupFencer;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.test.MockitoUtil;
 
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -118,7 +119,8 @@
   public void testFailoverToUnreadyService() throws Exception {
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
-    Mockito.doReturn(STATE_NOT_READY).when(svc2.proxy).getServiceStatus();
+    Mockito.doReturn(STATE_NOT_READY).when(svc2.proxy)
+        .getServiceStatus();
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
 
     try {
@@ -162,7 +164,7 @@
   public void testFailoverFromFaultyServiceSucceeds() throws Exception {
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     Mockito.doThrow(new ServiceFailedException("Failed!"))
-        .when(svc1.proxy).transitionToStandby();
+        .when(svc1.proxy).transitionToStandby(anyReqInfo());
 
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
@@ -185,7 +187,7 @@
   public void testFailoverFromFaultyServiceFencingFailure() throws Exception {
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     Mockito.doThrow(new ServiceFailedException("Failed!"))
-        .when(svc1.proxy).transitionToStandby();
+        .when(svc1.proxy).transitionToStandby(anyReqInfo());
 
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     svc1.fencer = svc2.fencer = setupFencer(AlwaysFailFencer.class.getName());
@@ -284,7 +286,7 @@
     DummyHAService svc1 = spy(new DummyHAService(HAServiceState.ACTIVE, svc1Addr));
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     Mockito.doThrow(new ServiceFailedException("Failed!"))
-        .when(svc2.proxy).transitionToActive();
+        .when(svc2.proxy).transitionToActive(anyReqInfo());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
 
     try {
@@ -295,8 +297,8 @@
     }
 
     // svc1 went standby then back to active
-    verify(svc1.proxy).transitionToStandby();
-    verify(svc1.proxy).transitionToActive();
+    verify(svc1.proxy).transitionToStandby(anyReqInfo());
+    verify(svc1.proxy).transitionToActive(anyReqInfo());
     assertEquals(HAServiceState.ACTIVE, svc1.state);
     assertEquals(HAServiceState.STANDBY, svc2.state);
   }
@@ -306,7 +308,7 @@
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     Mockito.doThrow(new ServiceFailedException("Failed!"))
-        .when(svc2.proxy).transitionToActive();
+        .when(svc2.proxy).transitionToActive(anyReqInfo());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
 
     try {
@@ -327,7 +329,7 @@
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     Mockito.doThrow(new ServiceFailedException("Failed!"))
-        .when(svc2.proxy).transitionToActive();
+        .when(svc2.proxy).transitionToActive(anyReqInfo());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
     AlwaysSucceedFencer.fenceCalled = 0;
 
@@ -346,12 +348,16 @@
     assertSame(svc2, AlwaysSucceedFencer.fencedSvc);
   }
 
+  private StateChangeRequestInfo anyReqInfo() {
+    return Mockito.<StateChangeRequestInfo>any();
+  }
+
   @Test
   public void testFailureToFenceOnFailbackFailsTheFailback() throws Exception {
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     Mockito.doThrow(new IOException("Failed!"))
-        .when(svc2.proxy).transitionToActive();
+        .when(svc2.proxy).transitionToActive(anyReqInfo());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysFailFencer.class.getName());
     AlwaysFailFencer.fenceCalled = 0;
 
@@ -374,10 +380,10 @@
   public void testFailbackToFaultyServiceFails() throws Exception {
     DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
     Mockito.doThrow(new ServiceFailedException("Failed!"))
-        .when(svc1.proxy).transitionToActive();
+        .when(svc1.proxy).transitionToActive(anyReqInfo());
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     Mockito.doThrow(new ServiceFailedException("Failed!"))
-        .when(svc2.proxy).transitionToActive();
+        .when(svc2.proxy).transitionToActive(anyReqInfo());
 
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
 
@@ -420,7 +426,8 @@
   
   private void doFailover(HAServiceTarget tgt1, HAServiceTarget tgt2,
       boolean forceFence, boolean forceActive) throws FailoverFailedException {
-    FailoverController fc = new FailoverController(conf);
+    FailoverController fc = new FailoverController(conf, 
+        RequestSource.REQUEST_BY_USER);
     fc.failover(tgt1, tgt2, forceFence, forceActive);
   }
 
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAZKUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAZKUtil.java
new file mode 100644
index 0000000..7b4d63a
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAZKUtil.java
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha;
+
+import static org.junit.Assert.*;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.ha.HAZKUtil.BadAclFormatException;
+import org.apache.hadoop.ha.HAZKUtil.ZKAuthInfo;
+import org.apache.zookeeper.ZooDefs.Perms;
+import org.apache.zookeeper.data.ACL;
+import org.junit.Test;
+
+import com.google.common.base.Charsets;
+import com.google.common.io.Files;
+
+public class TestHAZKUtil {
+  private static final String TEST_ROOT_DIR = System.getProperty(
+      "test.build.data", "/tmp") + "/TestHAZKUtil";
+  private static final File TEST_FILE = new File(TEST_ROOT_DIR,
+      "test-file");
+  
+  /** A path which is expected not to exist */
+  private static final String BOGUS_FILE = "/xxxx-this-does-not-exist";
+
+  @Test
+  public void testEmptyACL() {
+    List<ACL> result = HAZKUtil.parseACLs("");
+    assertTrue(result.isEmpty());
+  }
+  
+  @Test
+  public void testNullACL() {
+    List<ACL> result = HAZKUtil.parseACLs(null);
+    assertTrue(result.isEmpty());
+  }
+  
+  @Test
+  public void testInvalidACLs() {
+    badAcl("a:b",
+        "ACL 'a:b' not of expected form scheme:id:perm"); // not enough parts
+    badAcl("a",
+        "ACL 'a' not of expected form scheme:id:perm"); // not enough parts
+    badAcl("password:foo:rx",
+        "Invalid permission 'x' in permission string 'rx'");
+  }
+  
+  private static void badAcl(String acls, String expectedErr) {
+    try {
+      HAZKUtil.parseACLs(acls);
+      fail("Should have failed to parse '" + acls + "'");
+    } catch (BadAclFormatException e) {
+      assertEquals(expectedErr, e.getMessage());
+    }
+  }
+
+  @Test
+  public void testGoodACLs() {
+    List<ACL> result = HAZKUtil.parseACLs(
+        "sasl:hdfs/host1@MY.DOMAIN:cdrwa, sasl:hdfs/host2@MY.DOMAIN:ca");
+    ACL acl0 = result.get(0);
+    assertEquals(Perms.CREATE | Perms.DELETE | Perms.READ |
+        Perms.WRITE | Perms.ADMIN, acl0.getPerms());
+    assertEquals("sasl", acl0.getId().getScheme());
+    assertEquals("hdfs/host1@MY.DOMAIN", acl0.getId().getId());
+    
+    ACL acl1 = result.get(1);
+    assertEquals(Perms.CREATE | Perms.ADMIN, acl1.getPerms());
+    assertEquals("sasl", acl1.getId().getScheme());
+    assertEquals("hdfs/host2@MY.DOMAIN", acl1.getId().getId());
+  }
+  
+  @Test
+  public void testEmptyAuth() {
+    List<ZKAuthInfo> result = HAZKUtil.parseAuth("");
+    assertTrue(result.isEmpty());
+  }
+  
+  @Test
+  public void testNullAuth() {
+    List<ZKAuthInfo> result = HAZKUtil.parseAuth(null);
+    assertTrue(result.isEmpty());
+  }
+  
+  @Test
+  public void testGoodAuths() {
+    List<ZKAuthInfo> result = HAZKUtil.parseAuth(
+        "scheme:data,\n   scheme2:user:pass");
+    assertEquals(2, result.size());
+    ZKAuthInfo auth0 = result.get(0);
+    assertEquals("scheme", auth0.getScheme());
+    assertEquals("data", new String(auth0.getAuth()));
+    
+    ZKAuthInfo auth1 = result.get(1);
+    assertEquals("scheme2", auth1.getScheme());
+    assertEquals("user:pass", new String(auth1.getAuth()));
+  }
+  
+  @Test
+  public void testConfIndirection() throws IOException {
+    assertNull(HAZKUtil.resolveConfIndirection(null));
+    assertEquals("x", HAZKUtil.resolveConfIndirection("x"));
+    
+    TEST_FILE.getParentFile().mkdirs();
+    Files.write("hello world", TEST_FILE, Charsets.UTF_8);
+    assertEquals("hello world", HAZKUtil.resolveConfIndirection(
+        "@" + TEST_FILE.getAbsolutePath()));
+    
+    try {
+      HAZKUtil.resolveConfIndirection("@" + BOGUS_FILE);
+      fail("Did not throw for non-existent file reference");
+    } catch (FileNotFoundException fnfe) {
+      assertTrue(fnfe.getMessage().startsWith(BOGUS_FILE));
+    }
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
index 93f46a5..e5480e2 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
@@ -19,93 +19,58 @@
 
 import static org.junit.Assert.*;
 
-import java.io.File;
-import java.net.InetSocketAddress;
+import java.security.NoSuchAlgorithmException;
 
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.ha.HealthMonitor.State;
-import org.apache.hadoop.test.MultithreadedTestUtil;
-import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
-import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
+import org.apache.hadoop.ha.MiniZKFCCluster.DummyZKFC;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
-import org.apache.zookeeper.KeeperException.NoNodeException;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.data.Stat;
-import org.apache.zookeeper.server.ZooKeeperServer;
-import org.apache.zookeeper.test.ClientBase;
+import org.apache.zookeeper.server.auth.DigestAuthenticationProvider;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 
-import com.google.common.primitives.Ints;
-
-public class TestZKFailoverController extends ClientBase {
+public class TestZKFailoverController extends ClientBaseWithFixes {
   private Configuration conf;
-  private DummyHAService svc1;
-  private DummyHAService svc2;
-  private TestContext ctx;
-  private DummyZKFCThread thr1, thr2;
+  private MiniZKFCCluster cluster;
+  
+  // Set up ZK digest-based credentials for the purposes of the tests,
+  // to make sure all of our functionality works with auth and ACLs
+  // present.
+  private static final String DIGEST_USER_PASS="test-user:test-password";
+  private static final String TEST_AUTH_GOOD =
+    "digest:" + DIGEST_USER_PASS;
+  private static final String DIGEST_USER_HASH;
+  static {
+    try {
+      DIGEST_USER_HASH = DigestAuthenticationProvider.generateDigest(
+          DIGEST_USER_PASS);
+    } catch (NoSuchAlgorithmException e) {
+      throw new RuntimeException(e);
+    }
+  }
+  private static final String TEST_ACL =
+    "digest:" + DIGEST_USER_HASH + ":rwcda";
   
   static {
     ((Log4JLogger)ActiveStandbyElector.LOG).getLogger().setLevel(Level.ALL);
   }
   
-  @Override
-  public void setUp() throws Exception {
-    // build.test.dir is used by zookeeper
-    new File(System.getProperty("build.test.dir", "build")).mkdirs();
-    super.setUp();
-  }
-  
   @Before
   public void setupConfAndServices() {
     conf = new Configuration();
-    conf.set(ZKFailoverController.ZK_QUORUM_KEY, hostPort);
-    // Fast check interval so tests run faster
-    conf.setInt(CommonConfigurationKeys.HA_HM_CHECK_INTERVAL_KEY, 50);
-    conf.setInt(CommonConfigurationKeys.HA_HM_CONNECT_RETRY_INTERVAL_KEY, 50);
-    conf.setInt(CommonConfigurationKeys.HA_HM_SLEEP_AFTER_DISCONNECT_KEY, 50);
-    svc1 = new DummyHAService(HAServiceState.INITIALIZING,
-        new InetSocketAddress("svc1", 1234));
-    svc2 = new DummyHAService(HAServiceState.INITIALIZING,
-        new InetSocketAddress("svc2", 1234));
-  }
-  
-  /**
-   * Set up two services and their failover controllers. svc1 is started
-   * first, so that it enters ACTIVE state, and then svc2 is started,
-   * which enters STANDBY
-   */
-  private void setupFCs() throws Exception {
-    // Format the base dir, should succeed
-    assertEquals(0, runFC(svc1, "-formatZK"));
+    conf.set(ZKFailoverController.ZK_ACL_KEY, TEST_ACL);
+    conf.set(ZKFailoverController.ZK_AUTH_KEY, TEST_AUTH_GOOD);
 
-    ctx = new MultithreadedTestUtil.TestContext();
-    thr1 = new DummyZKFCThread(ctx, svc1);
-    ctx.addThread(thr1);
-    thr1.start();
-    
-    LOG.info("Waiting for svc1 to enter active state");
-    waitForHAState(svc1, HAServiceState.ACTIVE);
-    
-    LOG.info("Adding svc2");
-    thr2 = new DummyZKFCThread(ctx, svc2);
-    thr2.start();
-    waitForHAState(svc2, HAServiceState.STANDBY);
-  }
-  
-  private void stopFCs() throws Exception {
-    if (thr1 != null) {
-      thr1.interrupt();
-    }
-    if (thr2 != null) {
-      thr2.interrupt();
-    }
-    if (ctx != null) {
-      ctx.stop();
-    }
+    conf.set(ZKFailoverController.ZK_QUORUM_KEY, hostPort);
+    this.cluster = new MiniZKFCCluster(conf, getServer(serverFactory));
   }
 
   /**
@@ -114,20 +79,104 @@
    */
   @Test(timeout=15000)
   public void testFormatZK() throws Exception {
+    DummyHAService svc = cluster.getService(1);
     // Run without formatting the base dir,
     // should barf
     assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,
-        runFC(svc1));
+        runFC(svc));
 
     // Format the base dir, should succeed
-    assertEquals(0, runFC(svc1, "-formatZK"));
+    assertEquals(0, runFC(svc, "-formatZK"));
 
     // Should fail to format if already formatted
     assertEquals(ZKFailoverController.ERR_CODE_FORMAT_DENIED,
-        runFC(svc1, "-formatZK", "-nonInteractive"));
+        runFC(svc, "-formatZK", "-nonInteractive"));
   
     // Unless '-force' is on
-    assertEquals(0, runFC(svc1, "-formatZK", "-force"));
+    assertEquals(0, runFC(svc, "-formatZK", "-force"));
+  }
+  
+  /**
+   * Test that if ZooKeeper is not running, the correct error
+   * code is returned.
+   */
+  @Test(timeout=15000)
+  public void testNoZK() throws Exception {
+    stopServer();
+    DummyHAService svc = cluster.getService(1);
+    assertEquals(ZKFailoverController.ERR_CODE_NO_ZK,
+        runFC(svc));
+  }
+  
+  @Test
+  public void testFormatOneClusterLeavesOtherClustersAlone() throws Exception {
+    DummyHAService svc = cluster.getService(1);
+
+    DummyZKFC zkfcInOtherCluster = new DummyZKFC(conf, cluster.getService(1)) {
+      @Override
+      protected String getScopeInsideParentNode() {
+        return "other-scope";
+      }
+    };
+    
+    // Run without formatting the base dir,
+    // should barf
+    assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,
+        runFC(svc));
+
+    // Format the base dir, should succeed
+    assertEquals(0, runFC(svc, "-formatZK"));
+    
+    // Run the other cluster without formatting, should barf because
+    // it uses a different parent znode
+    assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,
+        zkfcInOtherCluster.run(new String[]{}));
+    
+    // Should succeed in formatting the second cluster
+    assertEquals(0, zkfcInOtherCluster.run(new String[]{"-formatZK"}));
+
+    // But should not have deleted the original base node from the first
+    // cluster
+    assertEquals(ZKFailoverController.ERR_CODE_FORMAT_DENIED,
+        runFC(svc, "-formatZK", "-nonInteractive"));
+  }
+  
+  /**
+   * Test that automatic failover won't run against a target that hasn't
+   * explicitly enabled the feature.
+   */
+  @Test(timeout=10000)
+  public void testWontRunWhenAutoFailoverDisabled() throws Exception {
+    DummyHAService svc = cluster.getService(1);
+    svc = Mockito.spy(svc);
+    Mockito.doReturn(false).when(svc).isAutoFailoverEnabled();
+    
+    assertEquals(ZKFailoverController.ERR_CODE_AUTO_FAILOVER_NOT_ENABLED,
+        runFC(svc, "-formatZK"));
+    assertEquals(ZKFailoverController.ERR_CODE_AUTO_FAILOVER_NOT_ENABLED,
+        runFC(svc));
+  }
+  
+  /**
+   * Test that, if ACLs are specified in the configuration, that
+   * it sets the ACLs when formatting the parent node.
+   */
+  @Test(timeout=15000)
+  public void testFormatSetsAcls() throws Exception {
+    // Format the base dir, should succeed
+    DummyHAService svc = cluster.getService(1);
+    assertEquals(0, runFC(svc, "-formatZK"));
+
+    ZooKeeper otherClient = createClient();
+    try {
+      // client without auth should not be able to read it
+      Stat stat = new Stat();
+      otherClient.getData(ZKFailoverController.ZK_PARENT_ZNODE_DEFAULT,
+          false, stat);
+      fail("Was able to read data without authenticating!");
+    } catch (KeeperException.NoAuthException nae) {
+      // expected
+    }
   }
   
   /**
@@ -136,14 +185,14 @@
    */
   @Test(timeout=15000)
   public void testFencingMustBeConfigured() throws Exception {
-    svc1 = Mockito.spy(svc1);
+    DummyHAService svc = Mockito.spy(cluster.getService(0));
     Mockito.doThrow(new BadFencingConfigurationException("no fencing"))
-        .when(svc1).checkFencingConfigured();
+        .when(svc).checkFencingConfigured();
     // Format the base dir, should succeed
-    assertEquals(0, runFC(svc1, "-formatZK"));
+    assertEquals(0, runFC(svc, "-formatZK"));
     // Try to run the actual FC, should fail without a fencer
     assertEquals(ZKFailoverController.ERR_CODE_NO_FENCER,
-        runFC(svc1));
+        runFC(svc));
   }
   
   /**
@@ -155,66 +204,50 @@
   @Test(timeout=15000)
   public void testAutoFailoverOnBadHealth() throws Exception {
     try {
-      setupFCs();
+      cluster.start();
+      DummyHAService svc1 = cluster.getService(1);
       
-      LOG.info("Faking svc1 unhealthy, should failover to svc2");
-      svc1.isHealthy = false;
-      LOG.info("Waiting for svc1 to enter standby state");
-      waitForHAState(svc1, HAServiceState.STANDBY);
-      waitForHAState(svc2, HAServiceState.ACTIVE);
+      LOG.info("Faking svc0 unhealthy, should failover to svc1");
+      cluster.setHealthy(0, false);
+      
+      LOG.info("Waiting for svc0 to enter standby state");
+      cluster.waitForHAState(0, HAServiceState.STANDBY);
+      cluster.waitForHAState(1, HAServiceState.ACTIVE);
   
-      LOG.info("Allowing svc1 to be healthy again, making svc2 unreachable " +
+      LOG.info("Allowing svc0 to be healthy again, making svc1 unreachable " +
           "and fail to gracefully go to standby");
-      svc1.isHealthy = true;
-      svc2.actUnreachable = true;
-      
-      // Allow fencing to succeed
-      Mockito.doReturn(true).when(svc2.fencer).fence(Mockito.same(svc2));
-      // Should fail back to svc1 at this point
-      waitForHAState(svc1, HAServiceState.ACTIVE);
-      // and fence svc2
-      Mockito.verify(svc2.fencer).fence(Mockito.same(svc2));
+      cluster.setUnreachable(1, true);
+      cluster.setHealthy(0, true);
+ 
+      // Should fail back to svc0 at this point
+      cluster.waitForHAState(0, HAServiceState.ACTIVE);
+      // and fence svc1
+      Mockito.verify(svc1.fencer).fence(Mockito.same(svc1));
     } finally {
-      stopFCs();
+      cluster.stop();
     }
   }
   
   @Test(timeout=15000)
   public void testAutoFailoverOnLostZKSession() throws Exception {
     try {
-      setupFCs();
+      cluster.start();
 
-      // Expire svc1, it should fail over to svc2
-      expireAndVerifyFailover(thr1, thr2);
+      // Expire svc0, it should fail over to svc1
+      cluster.expireAndVerifyFailover(0, 1);
       
-      // Expire svc2, it should fail back to svc1
-      expireAndVerifyFailover(thr2, thr1);
+      // Expire svc1, it should fail back to svc0
+      cluster.expireAndVerifyFailover(1, 0);
       
       LOG.info("======= Running test cases second time to test " +
           "re-establishment =========");
-      // Expire svc1, it should fail over to svc2
-      expireAndVerifyFailover(thr1, thr2);
+      // Expire svc0, it should fail over to svc1
+      cluster.expireAndVerifyFailover(0, 1);
       
-      // Expire svc2, it should fail back to svc1
-      expireAndVerifyFailover(thr2, thr1);
+      // Expire svc1, it should fail back to svc0
+      cluster.expireAndVerifyFailover(1, 0);
     } finally {
-      stopFCs();
-    }
-  }
-  
-  private void expireAndVerifyFailover(DummyZKFCThread fromThr,
-      DummyZKFCThread toThr) throws Exception {
-    DummyHAService fromSvc = fromThr.zkfc.localTarget;
-    DummyHAService toSvc = toThr.zkfc.localTarget;
-    
-    fromThr.zkfc.getElectorForTests().preventSessionReestablishmentForTests();
-    try {
-      expireActiveLockHolder(fromSvc);
-      
-      waitForHAState(fromSvc, HAServiceState.STANDBY);
-      waitForHAState(toSvc, HAServiceState.ACTIVE);
-    } finally {
-      fromThr.zkfc.getElectorForTests().allowSessionReestablishmentForTests();
+      cluster.stop();
     }
   }
 
@@ -225,33 +258,32 @@
   @Test(timeout=15000)
   public void testDontFailoverToUnhealthyNode() throws Exception {
     try {
-      setupFCs();
+      cluster.start();
 
-      // Make svc2 unhealthy, and wait for its FC to notice the bad health.
-      svc2.isHealthy = false;
-      waitForHealthState(thr2.zkfc,
-          HealthMonitor.State.SERVICE_UNHEALTHY);
+      // Make svc1 unhealthy, and wait for its FC to notice the bad health.
+      cluster.setHealthy(1, false);
+      cluster.waitForHealthState(1, HealthMonitor.State.SERVICE_UNHEALTHY);
       
-      // Expire svc1
-      thr1.zkfc.getElectorForTests().preventSessionReestablishmentForTests();
+      // Expire svc0
+      cluster.getElector(0).preventSessionReestablishmentForTests();
       try {
-        expireActiveLockHolder(svc1);
+        cluster.expireActiveLockHolder(0);
 
-        LOG.info("Expired svc1's ZK session. Waiting a second to give svc2" +
+        LOG.info("Expired svc0's ZK session. Waiting a second to give svc1" +
             " a chance to take the lock, if it is ever going to.");
         Thread.sleep(1000);
         
         // Ensure that no one holds the lock.
-        waitForActiveLockHolder(null);
+        cluster.waitForActiveLockHolder(null);
         
       } finally {
-        LOG.info("Allowing svc1's elector to re-establish its connection");
-        thr1.zkfc.getElectorForTests().allowSessionReestablishmentForTests();
+        LOG.info("Allowing svc0's elector to re-establish its connection");
+        cluster.getElector(0).allowSessionReestablishmentForTests();
       }
-      // svc1 should get the lock again
-      waitForActiveLockHolder(svc1);
+      // svc0 should get the lock again
+      cluster.waitForActiveLockHolder(0);
     } finally {
-      stopFCs();
+      cluster.stop();
     }
   }
 
@@ -262,29 +294,38 @@
   @Test(timeout=15000)
   public void testBecomingActiveFails() throws Exception {
     try {
-      setupFCs();
+      cluster.start();
+      DummyHAService svc1 = cluster.getService(1);
       
-      LOG.info("Making svc2 fail to become active");
-      svc2.failToBecomeActive = true;
+      LOG.info("Making svc1 fail to become active");
+      cluster.setFailToBecomeActive(1, true);
       
-      LOG.info("Faking svc1 unhealthy, should NOT successfully " +
-          "failover to svc2");
-      svc1.isHealthy = false;
-      waitForHealthState(thr1.zkfc, State.SERVICE_UNHEALTHY);
-      waitForActiveLockHolder(null);
+      LOG.info("Faking svc0 unhealthy, should NOT successfully " +
+          "failover to svc1");
+      cluster.setHealthy(0, false);
+      cluster.waitForHealthState(0, State.SERVICE_UNHEALTHY);
+      cluster.waitForActiveLockHolder(null);
 
-      Mockito.verify(svc2.proxy).transitionToActive();
-
-      waitForHAState(svc1, HAServiceState.STANDBY);
-      waitForHAState(svc2, HAServiceState.STANDBY);
       
-      LOG.info("Faking svc1 healthy again, should go back to svc1");
-      svc1.isHealthy = true;
-      waitForHAState(svc1, HAServiceState.ACTIVE);
-      waitForHAState(svc2, HAServiceState.STANDBY);
-      waitForActiveLockHolder(svc1);
+      Mockito.verify(svc1.proxy, Mockito.timeout(2000).atLeastOnce())
+        .transitionToActive(Mockito.<StateChangeRequestInfo>any());
+
+      cluster.waitForHAState(0, HAServiceState.STANDBY);
+      cluster.waitForHAState(1, HAServiceState.STANDBY);
+      
+      LOG.info("Faking svc0 healthy again, should go back to svc0");
+      cluster.setHealthy(0, true);
+      cluster.waitForHAState(0, HAServiceState.ACTIVE);
+      cluster.waitForHAState(1, HAServiceState.STANDBY);
+      cluster.waitForActiveLockHolder(0);
+      
+      // Ensure that we can fail back to svc1  once it it is able
+      // to become active (e.g the admin has restarted it)
+      LOG.info("Allowing svc1 to become active, expiring svc0");
+      svc1.failToBecomeActive = false;
+      cluster.expireAndVerifyFailover(0, 1);
     } finally {
-      stopFCs();
+      cluster.stop();
     }
   }
   
@@ -296,27 +337,25 @@
   @Test(timeout=15000)
   public void testZooKeeperFailure() throws Exception {
     try {
-      setupFCs();
+      cluster.start();
 
       // Record initial ZK sessions
-      long session1 = thr1.zkfc.getElectorForTests().getZKSessionIdForTests();
-      long session2 = thr2.zkfc.getElectorForTests().getZKSessionIdForTests();
+      long session0 = cluster.getElector(0).getZKSessionIdForTests();
+      long session1 = cluster.getElector(1).getZKSessionIdForTests();
 
       LOG.info("====== Stopping ZK server");
       stopServer();
       waitForServerDown(hostPort, CONNECTION_TIMEOUT);
       
       LOG.info("====== Waiting for services to enter NEUTRAL mode");
-      ActiveStandbyElectorTestUtil.waitForElectorState(ctx,
-          thr1.zkfc.getElectorForTests(),
+      cluster.waitForElectorState(0,
           ActiveStandbyElector.State.NEUTRAL);
-      ActiveStandbyElectorTestUtil.waitForElectorState(ctx,
-          thr2.zkfc.getElectorForTests(),
+      cluster.waitForElectorState(1,
           ActiveStandbyElector.State.NEUTRAL);
 
       LOG.info("====== Checking that the services didn't change HA state");
-      assertEquals(HAServiceState.ACTIVE, svc1.state);
-      assertEquals(HAServiceState.STANDBY, svc2.state);
+      assertEquals(HAServiceState.ACTIVE, cluster.getService(0).state);
+      assertEquals(HAServiceState.STANDBY, cluster.getService(1).state);
       
       LOG.info("====== Restarting server");
       startServer();
@@ -324,134 +363,224 @@
 
       // Nodes should go back to their original states, since they re-obtain
       // the same sessions.
-      ActiveStandbyElectorTestUtil.waitForElectorState(ctx,
-          thr1.zkfc.getElectorForTests(),
-          ActiveStandbyElector.State.ACTIVE);
-      ActiveStandbyElectorTestUtil.waitForElectorState(ctx,
-          thr2.zkfc.getElectorForTests(),
-          ActiveStandbyElector.State.STANDBY);
+      cluster.waitForElectorState(0, ActiveStandbyElector.State.ACTIVE);
+      cluster.waitForElectorState(1, ActiveStandbyElector.State.STANDBY);
       // Check HA states didn't change.
-      ActiveStandbyElectorTestUtil.waitForElectorState(ctx,
-          thr1.zkfc.getElectorForTests(),
-          ActiveStandbyElector.State.ACTIVE);
-      ActiveStandbyElectorTestUtil.waitForElectorState(ctx,
-          thr2.zkfc.getElectorForTests(),
-          ActiveStandbyElector.State.STANDBY);
+      cluster.waitForHAState(0, HAServiceState.ACTIVE);
+      cluster.waitForHAState(1, HAServiceState.STANDBY);
+
       // Check they re-used the same sessions and didn't spuriously reconnect
+      assertEquals(session0,
+          cluster.getElector(0).getZKSessionIdForTests());
       assertEquals(session1,
-          thr1.zkfc.getElectorForTests().getZKSessionIdForTests());
-      assertEquals(session2,
-          thr2.zkfc.getElectorForTests().getZKSessionIdForTests());
+          cluster.getElector(1).getZKSessionIdForTests());
     } finally {
-      stopFCs();
-    }
-  }
-
-  /**
-   * Expire the ZK session of the given service. This requires
-   * (and asserts) that the given service be the current active.
-   * @throws NoNodeException if no service holds the lock
-   */
-  private void expireActiveLockHolder(DummyHAService expectedActive)
-      throws NoNodeException {
-    ZooKeeperServer zks = getServer(serverFactory);
-    Stat stat = new Stat();
-    byte[] data = zks.getZKDatabase().getData(
-        ZKFailoverController.ZK_PARENT_ZNODE_DEFAULT + "/" +
-        ActiveStandbyElector.LOCK_FILENAME, stat, null);
-    
-    assertArrayEquals(Ints.toByteArray(expectedActive.index), data);
-    long session = stat.getEphemeralOwner();
-    LOG.info("Expiring svc " + expectedActive + "'s zookeeper session " + session);
-    zks.closeSession(session);
-  }
-  
-  /**
-   * Wait for the given HA service to enter the given HA state.
-   */
-  private void waitForHAState(DummyHAService svc, HAServiceState state)
-      throws Exception {
-    while (svc.state != state) {
-      ctx.checkException();
-      Thread.sleep(50);
+      cluster.stop();
     }
   }
   
   /**
-   * Wait for the ZKFC to be notified of a change in health state.
+   * Test that the ZKFC can gracefully cede its active status.
    */
-  private void waitForHealthState(DummyZKFC zkfc, State state)
+  @Test(timeout=15000)
+  public void testCedeActive() throws Exception {
+    try {
+      cluster.start();
+      DummyZKFC zkfc = cluster.getZkfc(0);
+      // It should be in active to start.
+      assertEquals(ActiveStandbyElector.State.ACTIVE,
+          zkfc.getElectorForTests().getStateForTests());
+
+      // Ask it to cede active for 3 seconds. It should respond promptly
+      // (i.e. the RPC itself should not take 3 seconds!)
+      ZKFCProtocol proxy = zkfc.getLocalTarget().getZKFCProxy(conf, 5000);
+      long st = System.currentTimeMillis();
+      proxy.cedeActive(3000);
+      long et = System.currentTimeMillis();
+      assertTrue("RPC to cedeActive took " + (et - st) + " ms",
+          et - st < 1000);
+      
+      // Should be in "INIT" state since it's not in the election
+      // at this point.
+      assertEquals(ActiveStandbyElector.State.INIT,
+          zkfc.getElectorForTests().getStateForTests());
+
+      // After the prescribed 3 seconds, should go into STANDBY state,
+      // since the other node in the cluster would have taken ACTIVE.
+      cluster.waitForElectorState(0, ActiveStandbyElector.State.STANDBY);
+      long et2 = System.currentTimeMillis();
+      assertTrue("Should take ~3 seconds to rejoin. Only took " + (et2 - et) +
+          "ms before rejoining.",
+          et2 - et > 2800);      
+    } finally {
+      cluster.stop();
+    }
+  }
+  
+  @Test(timeout=15000)
+  public void testGracefulFailover() throws Exception {
+    try {
+      cluster.start();
+
+      cluster.waitForActiveLockHolder(0);
+      cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
+      cluster.waitForActiveLockHolder(1);
+      cluster.getService(0).getZKFCProxy(conf, 5000).gracefulFailover();
+      cluster.waitForActiveLockHolder(0);
+      
+      assertEquals(0, cluster.getService(0).fenceCount);
+      assertEquals(0, cluster.getService(1).fenceCount);
+    } finally {
+      cluster.stop();
+    }
+  }
+  
+  @Test(timeout=15000)
+  public void testGracefulFailoverToUnhealthy() throws Exception {
+    try {
+      cluster.start();
+
+      cluster.waitForActiveLockHolder(0);
+
+      // Mark it unhealthy, wait for it to exit election
+      cluster.setHealthy(1, false);
+      cluster.waitForElectorState(1, ActiveStandbyElector.State.INIT);
+      
+      // Ask for failover, it should fail, because it's unhealthy
+      try {
+        cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
+        fail("Did not fail to graceful failover to unhealthy service!");
+      } catch (ServiceFailedException sfe) {
+        GenericTestUtils.assertExceptionContains(
+            cluster.getService(1).toString() + 
+            " is not currently healthy.", sfe);
+      }
+    } finally {
+      cluster.stop();
+    }
+  }
+  
+  @Test(timeout=15000)
+  public void testGracefulFailoverFailBecomingActive() throws Exception {
+    try {
+      cluster.start();
+
+      cluster.waitForActiveLockHolder(0);
+      cluster.setFailToBecomeActive(1, true);
+      
+      // Ask for failover, it should fail and report back to user.
+      try {
+        cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
+        fail("Did not fail to graceful failover when target failed " +
+            "to become active!");
+      } catch (ServiceFailedException sfe) {
+        GenericTestUtils.assertExceptionContains(
+            "Couldn't make " + cluster.getService(1) + " active", sfe);
+        GenericTestUtils.assertExceptionContains(
+            "injected failure", sfe);
+      }
+      
+      // No fencing
+      assertEquals(0, cluster.getService(0).fenceCount);
+      assertEquals(0, cluster.getService(1).fenceCount);
+
+      // Service 0 should go back to being active after the failed failover
+      cluster.waitForActiveLockHolder(0);
+    } finally {
+      cluster.stop();
+    }
+  }
+
+  @Test(timeout=15000)
+  public void testGracefulFailoverFailBecomingStandby() throws Exception {
+    try {
+      cluster.start();
+
+      cluster.waitForActiveLockHolder(0);
+      
+      // Ask for failover when old node fails to transition to standby.
+      // This should trigger fencing, since the cedeActive() command
+      // still works, but leaves the breadcrumb in place.
+      cluster.setFailToBecomeStandby(0, true);
+      cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
+
+      // Check that the old node was fenced
+      assertEquals(1, cluster.getService(0).fenceCount);
+    } finally {
+      cluster.stop();
+    }
+  }
+  
+  @Test(timeout=15000)
+  public void testGracefulFailoverFailBecomingStandbyAndFailFence()
       throws Exception {
-    while (zkfc.getLastHealthState() != state) {
-      ctx.checkException();
-      Thread.sleep(50);
+    try {
+      cluster.start();
+
+      cluster.waitForActiveLockHolder(0);
+      
+      // Ask for failover when old node fails to transition to standby.
+      // This should trigger fencing, since the cedeActive() command
+      // still works, but leaves the breadcrumb in place.
+      cluster.setFailToBecomeStandby(0, true);
+      cluster.setFailToFence(0, true);
+
+      try {
+        cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
+        fail("Failover should have failed when old node wont fence");
+      } catch (ServiceFailedException sfe) {
+        GenericTestUtils.assertExceptionContains(
+            "Unable to fence " + cluster.getService(0), sfe);
+      }
+    } finally {
+      cluster.stop();
     }
   }
 
   /**
-   * Wait for the given HA service to become the active lock holder.
-   * If the passed svc is null, waits for there to be no active
-   * lock holder.
+   * Test which exercises all of the inputs into ZKFC. This is particularly
+   * useful for running under jcarder to check for lock order violations.
    */
-  private void waitForActiveLockHolder(DummyHAService svc)
-      throws Exception {
-    ZooKeeperServer zks = getServer(serverFactory);
-    ActiveStandbyElectorTestUtil.waitForActiveLockData(ctx, zks,
-        ZKFailoverController.ZK_PARENT_ZNODE_DEFAULT,
-        (svc == null) ? null : Ints.toByteArray(svc.index));
-  }
+  @Test(timeout=30000)
+  public void testOneOfEverything() throws Exception {
+    try {
+      cluster.start();
+      
+      // Failover by session expiration
+      LOG.info("====== Failing over by session expiration");
+      cluster.expireAndVerifyFailover(0, 1);
+      cluster.expireAndVerifyFailover(1, 0);
+      
+      // Restart ZK
+      LOG.info("====== Restarting server");
+      stopServer();
+      waitForServerDown(hostPort, CONNECTION_TIMEOUT);
+      startServer();
+      waitForServerUp(hostPort, CONNECTION_TIMEOUT);
 
+      // Failover by bad health
+      cluster.setHealthy(0, false);
+      cluster.waitForHAState(0, HAServiceState.STANDBY);
+      cluster.waitForHAState(1, HAServiceState.ACTIVE);
+      cluster.setHealthy(1, true);
+      cluster.setHealthy(0, false);
+      cluster.waitForHAState(1, HAServiceState.ACTIVE);
+      cluster.waitForHAState(0, HAServiceState.STANDBY);
+      cluster.setHealthy(0, true);
+      
+      cluster.waitForHealthState(0, State.SERVICE_HEALTHY);
+      
+      // Graceful failovers
+      cluster.getZkfc(1).gracefulFailoverToYou();
+      cluster.getZkfc(0).gracefulFailoverToYou();
+    } finally {
+      cluster.stop();
+    }
+  }
 
   private int runFC(DummyHAService target, String ... args) throws Exception {
-    DummyZKFC zkfc = new DummyZKFC(target);
-    zkfc.setConf(conf);
+    DummyZKFC zkfc = new DummyZKFC(conf, target);
     return zkfc.run(args);
   }
 
-  /**
-   * Test-thread which runs a ZK Failover Controller corresponding
-   * to a given dummy service.
-   */
-  private class DummyZKFCThread extends TestingThread {
-    private final DummyZKFC zkfc;
-
-    public DummyZKFCThread(TestContext ctx, DummyHAService svc) {
-      super(ctx);
-      this.zkfc = new DummyZKFC(svc);
-      zkfc.setConf(conf);
-    }
-
-    @Override
-    public void doWork() throws Exception {
-      try {
-        assertEquals(0, zkfc.run(new String[0]));
-      } catch (InterruptedException ie) {
-        // Interrupted by main thread, that's OK.
-      }
-    }
-  }
-  
-  private static class DummyZKFC extends ZKFailoverController {
-    private final DummyHAService localTarget;
-    
-    public DummyZKFC(DummyHAService localTarget) {
-      this.localTarget = localTarget;
-    }
-
-    @Override
-    protected byte[] targetToData(HAServiceTarget target) {
-      return Ints.toByteArray(((DummyHAService)target).index);
-    }
-    
-    @Override
-    protected HAServiceTarget dataToTarget(byte[] data) {
-      int index = Ints.fromByteArray(data);
-      return DummyHAService.getInstance(index);
-    }
-
-    @Override
-    protected HAServiceTarget getLocalTarget() {
-      return localTarget;
-    }
-  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverControllerStress.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverControllerStress.java
new file mode 100644
index 0000000..c1c2726
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverControllerStress.java
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha;
+
+import java.util.Random;
+
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+/**
+ * Stress test for ZKFailoverController.
+ * Starts multiple ZKFCs for dummy services, and then performs many automatic
+ * failovers. While doing so, ensures that a fake "shared resource"
+ * (simulating the shared edits dir) is only owned by one service at a time. 
+ */
+public class TestZKFailoverControllerStress extends ClientBaseWithFixes {
+  
+  private static final int STRESS_RUNTIME_SECS = 30;
+  private static final int EXTRA_TIMEOUT_SECS = 10;
+  
+  private Configuration conf;
+  private MiniZKFCCluster cluster;
+
+  @Before
+  public void setupConfAndServices() throws Exception {
+    conf = new Configuration();
+    conf.set(ZKFailoverController.ZK_QUORUM_KEY, hostPort);
+    this.cluster = new MiniZKFCCluster(conf, getServer(serverFactory));
+  }
+  
+  @After
+  public void stopCluster() throws Exception {
+    cluster.stop();
+  }
+
+  /**
+   * Simply fail back and forth between two services for the
+   * configured amount of time, via expiring their ZK sessions.
+   */
+  @Test(timeout=(STRESS_RUNTIME_SECS + EXTRA_TIMEOUT_SECS) * 1000)
+  public void testExpireBackAndForth() throws Exception {
+    cluster.start();
+    long st = System.currentTimeMillis();
+    long runFor = STRESS_RUNTIME_SECS * 1000;
+
+    int i = 0;
+    while (System.currentTimeMillis() - st < runFor) {
+      // flip flop the services back and forth
+      int from = i % 2;
+      int to = (i + 1) % 2;
+
+      // Expire one service, it should fail over to the other
+      LOG.info("Failing over via expiration from " + from + " to " + to);
+      cluster.expireAndVerifyFailover(from, to);
+
+      i++;
+    }
+  }
+  
+  /**
+   * Randomly expire the ZK sessions of the two ZKFCs. This differs
+   * from the above test in that it is not a controlled failover -
+   * we just do random expirations and expect neither one to ever
+   * generate fatal exceptions.
+   */
+  @Test(timeout=(STRESS_RUNTIME_SECS + EXTRA_TIMEOUT_SECS) * 1000)
+  public void testRandomExpirations() throws Exception {
+    cluster.start();
+    long st = System.currentTimeMillis();
+    long runFor = STRESS_RUNTIME_SECS * 1000;
+
+    Random r = new Random();
+    while (System.currentTimeMillis() - st < runFor) {
+      cluster.getTestContext().checkException();
+      int targetIdx = r.nextInt(2);
+      ActiveStandbyElector target = cluster.getElector(targetIdx);
+      long sessId = target.getZKSessionIdForTests();
+      if (sessId != -1) {
+        LOG.info(String.format("Expiring session %x for svc %d",
+            sessId, targetIdx));
+        getServer(serverFactory).closeSession(sessId);
+      }
+      Thread.sleep(r.nextInt(300));
+    }
+  }
+  
+  /**
+   * Have the services fail their health checks half the time,
+   * causing the master role to bounce back and forth in the
+   * cluster. Meanwhile, causes ZK to disconnect clients every
+   * 50ms, to trigger the retry code and failures to become active.
+   */
+  @Test(timeout=(STRESS_RUNTIME_SECS + EXTRA_TIMEOUT_SECS) * 1000)
+  public void testRandomHealthAndDisconnects() throws Exception {
+    long runFor = STRESS_RUNTIME_SECS * 1000;
+    Mockito.doAnswer(new RandomlyThrow(0))
+        .when(cluster.getService(0).proxy).monitorHealth();
+    Mockito.doAnswer(new RandomlyThrow(1))
+        .when(cluster.getService(1).proxy).monitorHealth();
+    ActiveStandbyElector.NUM_RETRIES = 100;
+    
+    // Don't start until after the above mocking. Otherwise we can get
+    // Mockito errors if the HM calls the proxy in the middle of
+    // setting up the mock.
+    cluster.start();
+    
+    long st = System.currentTimeMillis();
+    while (System.currentTimeMillis() - st < runFor) {
+      cluster.getTestContext().checkException();
+      serverFactory.closeAll();
+      Thread.sleep(50);
+    }
+  }
+  
+  
+  /**
+   * Randomly throw an exception half the time the method is called
+   */
+  @SuppressWarnings("rawtypes")
+  private static class RandomlyThrow implements Answer {
+    private Random r = new Random();
+    private final int svcIdx;
+    public RandomlyThrow(int svcIdx) {
+      this.svcIdx = svcIdx;
+    }
+    @Override
+    public Object answer(InvocationOnMock invocation) throws Throwable {
+      if (r.nextBoolean()) {
+        LOG.info("Throwing an exception for svc " + svcIdx);
+        throw new HealthCheckFailedException("random failure");
+      }
+      return invocation.callRealMethod();
+    }
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/RemoteExecution.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ZKFCTestUtil.java
similarity index 65%
copy from hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/RemoteExecution.java
copy to hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ZKFCTestUtil.java
index 6177c79..4a5eacd 100644
--- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/RemoteExecution.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ZKFCTestUtil.java
@@ -15,13 +15,20 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+package org.apache.hadoop.ha;
 
-package org.apache.hadoop.util;
+import org.apache.hadoop.test.MultithreadedTestUtil;
 
-public interface RemoteExecution {
-  public void executeCommand (String remoteHostName, String user,
-          String  command) throws Exception;
-  public int getExitCode();
-  public String getOutput();
-  public String getCommandString();
+public class ZKFCTestUtil {
+  
+  public static void waitForHealthState(ZKFailoverController zkfc,
+      HealthMonitor.State state,
+      MultithreadedTestUtil.TestContext ctx) throws Exception {
+    while (zkfc.getLastHealthState() != state) {
+      if (ctx != null) {
+        ctx.checkException();
+      }
+      Thread.sleep(50);
+    }
+  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/lib/TestStaticUserWebFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/lib/TestStaticUserWebFilter.java
index b7bf98c..9b161df 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/lib/TestStaticUserWebFilter.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/lib/TestStaticUserWebFilter.java
@@ -27,6 +27,7 @@
 import javax.servlet.http.HttpServletRequestWrapper;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.http.lib.StaticUserWebFilter.StaticUserFilter;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
@@ -36,7 +37,7 @@
   private FilterConfig mockConfig(String username) {
     FilterConfig mock = Mockito.mock(FilterConfig.class);
     Mockito.doReturn(username).when(mock).getInitParameter(
-        StaticUserWebFilter.USERNAME_KEY);
+        CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER);
     return mock;
   }
   
@@ -73,7 +74,7 @@
   @Test
   public void testConfiguration() {
     Configuration conf = new Configuration();
-    conf.set(StaticUserWebFilter.USERNAME_KEY, "joe");
+    conf.set(CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER, "joe");
     assertEquals("joe", StaticUserWebFilter.getUsernameFromConf(conf));
   }
 
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
index a86c532..9bf83b9 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
@@ -241,6 +241,30 @@
     Text.validateUTF8(utf8, 0, length);
   }
 
+  public void testClear() throws Exception {
+    // Test lengths on an empty text object
+    Text text = new Text();
+    assertEquals(
+        "Actual string on an empty text object must be an empty string",
+        "", text.toString());
+    assertEquals("Underlying byte array length must be zero",
+        0, text.getBytes().length);
+    assertEquals("String's length must be zero",
+        0, text.getLength());
+
+    // Test if clear works as intended
+    text = new Text("abcd\u20acbdcd\u20ac");
+    int len = text.getLength();
+    text.clear();
+    assertEquals("String must be empty after clear()",
+        "", text.toString());
+    assertTrue(
+        "Length of the byte array must not decrease after clear()",
+        text.getBytes().length >= len);
+    assertEquals("Length of the string must be reset to 0 after clear()",
+        0, text.getLength());
+  }
+
   public void testTextText() throws CharacterCodingException {
     Text a=new Text("abc");
     Text b=new Text("a");
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java
index 54b75da..6d7d695 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java
@@ -18,11 +18,15 @@
 
 package org.apache.hadoop.security.token;
 
+import static junit.framework.Assert.assertEquals;
+
 import java.io.*;
 import java.util.Arrays;
 
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier;
+import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenSecretManager;
 
 import junit.framework.TestCase;
 
@@ -94,5 +98,20 @@
       checkUrlSafe(encode);
     }
   }
+  
+  public void testDecodeIdentifier() throws IOException {
+    TestDelegationTokenSecretManager secretManager =
+      new TestDelegationTokenSecretManager(0, 0, 0, 0);
+    secretManager.startThreads();
+    TestDelegationTokenIdentifier id = new TestDelegationTokenIdentifier(
+        new Text("owner"), new Text("renewer"), new Text("realUser"));
+    
+    Token<TestDelegationTokenIdentifier> token =
+      new Token<TestDelegationTokenIdentifier>(id, secretManager);
+    TokenIdentifier idCopy = token.decodeIdentifier();
+    
+    assertNotSame(id, idCopy);
+    assertEquals(id, idCopy);
+  }
 
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
new file mode 100644
index 0000000..891a67b
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
@@ -0,0 +1,2 @@
+org.apache.hadoop.ipc.TestSaslRPC$TestTokenIdentifier
+org.apache.hadoop.security.token.delegation.TestDelegationToken$TestDelegationTokenIdentifier
diff --git a/hadoop-common-project/hadoop-common/src/test/system/aop/org/apache/hadoop/test/system/DaemonProtocolAspect.aj b/hadoop-common-project/hadoop-common/src/test/system/aop/org/apache/hadoop/test/system/DaemonProtocolAspect.aj
deleted file mode 100644
index 76f5129..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/aop/org/apache/hadoop/test/system/DaemonProtocolAspect.aj
+++ /dev/null
@@ -1,400 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.io.File;
-import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
-import java.util.HashMap;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.Properties;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Shell.ShellCommandExecutor;
-import org.apache.hadoop.util.Shell;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.conf.Configuration;
-
-/**
- * Default DaemonProtocolAspect which is used to provide default implementation
- * for all the common daemon methods. If a daemon requires more specialized
- * version of method, it is responsibility of the DaemonClient to introduce the
- * same in woven classes.
- * 
- */
-public aspect DaemonProtocolAspect {
-
-  private boolean DaemonProtocol.ready;
-  
-  @SuppressWarnings("unchecked")
-  private HashMap<Object, List<ControlAction>> DaemonProtocol.actions = 
-    new HashMap<Object, List<ControlAction>>();
-  private static final Log LOG = LogFactory.getLog(
-      DaemonProtocolAspect.class.getName());
-
-  private static FsPermission defaultPermission = new FsPermission(
-     FsAction.READ_WRITE, FsAction.READ_WRITE, FsAction.READ_WRITE);
-
-  /**
-   * Set if the daemon process is ready or not, concrete daemon protocol should
-   * implement pointcuts to determine when the daemon is ready and use the
-   * setter to set the ready state.
-   * 
-   * @param ready
-   *          true if the Daemon is ready.
-   */
-  public void DaemonProtocol.setReady(boolean ready) {
-    this.ready = ready;
-  }
-
-  /**
-   * Checks if the daemon process is alive or not.
-   * 
-   * @throws IOException
-   *           if daemon is not alive.
-   */
-  public void DaemonProtocol.ping() throws IOException {
-  }
-
-  /**
-   * Checks if the daemon process is ready to accepting RPC connections after it
-   * finishes initialization. <br/>
-   * 
-   * @return true if ready to accept connection.
-   * 
-   * @throws IOException
-   */
-  public boolean DaemonProtocol.isReady() throws IOException {
-    return ready;
-  }
-
-  /**
-   * Returns the process related information regarding the daemon process. <br/>
-   * 
-   * @return process information.
-   * @throws IOException
-   */
-  public ProcessInfo DaemonProtocol.getProcessInfo() throws IOException {
-    int activeThreadCount = Thread.activeCount();
-    long currentTime = System.currentTimeMillis();
-    long maxmem = Runtime.getRuntime().maxMemory();
-    long freemem = Runtime.getRuntime().freeMemory();
-    long totalmem = Runtime.getRuntime().totalMemory();
-    Map<String, String> envMap = System.getenv();
-    Properties sysProps = System.getProperties();
-    Map<String, String> props = new HashMap<String, String>();
-    for (Map.Entry entry : sysProps.entrySet()) {
-      props.put((String) entry.getKey(), (String) entry.getValue());
-    }
-    ProcessInfo info = new ProcessInfoImpl(activeThreadCount, currentTime,
-        freemem, maxmem, totalmem, envMap, props);
-    return info;
-  }
-
-  public void DaemonProtocol.enable(List<Enum<?>> faults) throws IOException {
-  }
-
-  public void DaemonProtocol.disableAll() throws IOException {
-  }
-
-  public abstract Configuration DaemonProtocol.getDaemonConf()
-    throws IOException;
-
-  public FileStatus DaemonProtocol.getFileStatus(String path, boolean local) 
-    throws IOException {
-    Path p = new Path(path);
-    FileSystem fs = getFS(p, local);
-    p.makeQualified(fs);
-    FileStatus fileStatus = fs.getFileStatus(p);
-    return cloneFileStatus(fileStatus);
-  }
-  
-  /**
-   * Create a file with given permissions in a file system.
-   * @param path - source path where the file has to create.
-   * @param fileName - file name.
-   * @param permission - file permissions.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void DaemonProtocol.createFile(String path, String fileName, 
-     FsPermission permission, boolean local) throws IOException {
-    Path p = new Path(path); 
-    FileSystem fs = getFS(p, local);
-    Path filePath = new Path(path, fileName);
-    fs.create(filePath);
-    if (permission == null) {
-      fs.setPermission(filePath, defaultPermission);
-    } else {
-      fs.setPermission(filePath, permission);
-    }
-    fs.close();
-  }
-
-  /**
-   * Create a folder with given permissions in a file system.
-   * @param path - source path where the file has to be creating.
-   * @param folderName - folder name.
-   * @param permission - folder permissions.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void DaemonProtocol.createFolder(String path, String folderName, 
-     FsPermission permission, boolean local) throws IOException {
-    Path p = new Path(path);
-    FileSystem fs = getFS(p, local);
-    Path folderPath = new Path(path, folderName);
-    fs.mkdirs(folderPath);
-    if (permission == null) {
-      fs.setPermission(folderPath, defaultPermission);
-    } else {
-      fs.setPermission(folderPath, permission);
-    }
-    fs.close();
-  }
-
-  public FileStatus[] DaemonProtocol.listStatus(String path, boolean local) 
-    throws IOException {
-    Path p = new Path(path);
-    FileSystem fs = getFS(p, local);
-    FileStatus[] status = fs.listStatus(p);
-    if (status != null) {
-      FileStatus[] result = new FileStatus[status.length];
-      int i = 0;
-      for (FileStatus fileStatus : status) {
-        result[i++] = cloneFileStatus(fileStatus);
-      }
-      return result;
-    }
-    return status;
-  }
-
-  /**
-   * FileStatus object may not be serializable. Clone it into raw FileStatus 
-   * object.
-   */
-  private FileStatus DaemonProtocol.cloneFileStatus(FileStatus fileStatus) {
-    return new FileStatus(fileStatus.getLen(),
-        fileStatus.isDir(),
-        fileStatus.getReplication(),
-        fileStatus.getBlockSize(),
-        fileStatus.getModificationTime(),
-        fileStatus.getAccessTime(),
-        fileStatus.getPermission(),
-        fileStatus.getOwner(),
-        fileStatus.getGroup(),
-        fileStatus.getPath());
-  }
-
-  private FileSystem DaemonProtocol.getFS(final Path path, final boolean local)
-      throws IOException {
-    FileSystem ret = null;
-    try {
-      ret = UserGroupInformation.getLoginUser().doAs (
-          new PrivilegedExceptionAction<FileSystem>() {
-            public FileSystem run() throws IOException {
-              FileSystem fs = null;
-              if (local) {
-                fs = FileSystem.getLocal(getDaemonConf());
-              } else {
-                fs = path.getFileSystem(getDaemonConf());
-              }
-              return fs;
-            }
-          });
-    } catch (InterruptedException ie) {
-    }
-    return ret;
-  }
-  
-  @SuppressWarnings("unchecked")
-  public ControlAction[] DaemonProtocol.getActions(Writable key) 
-    throws IOException {
-    synchronized (actions) {
-      List<ControlAction> actionList = actions.get(key);
-      if(actionList == null) {
-        return new ControlAction[0];
-      } else {
-        return (ControlAction[]) actionList.toArray(new ControlAction[actionList
-                                                                      .size()]);
-      }
-    }
-  }
-
-
-  @SuppressWarnings("unchecked")
-  public void DaemonProtocol.sendAction(ControlAction action) 
-      throws IOException {
-    synchronized (actions) {
-      List<ControlAction> actionList = actions.get(action.getTarget());
-      if(actionList == null) {
-        actionList = new ArrayList<ControlAction>();
-        actions.put(action.getTarget(), actionList);
-      }
-      actionList.add(action);
-    } 
-  }
- 
-  @SuppressWarnings("unchecked")
-  public boolean DaemonProtocol.isActionPending(ControlAction action) 
-    throws IOException{
-    synchronized (actions) {
-      List<ControlAction> actionList = actions.get(action.getTarget());
-      if(actionList == null) {
-        return false;
-      } else {
-        return actionList.contains(action);
-      }
-    }
-  }
-  
-  
-  @SuppressWarnings("unchecked")
-  public void DaemonProtocol.removeAction(ControlAction action) 
-    throws IOException {
-    synchronized (actions) {
-      List<ControlAction> actionList = actions.get(action.getTarget());
-      if(actionList == null) {
-        return;
-      } else {
-        actionList.remove(action);
-      }
-    }
-  }
-  
-  public void DaemonProtocol.clearActions() throws IOException {
-    synchronized (actions) {
-      actions.clear();
-    }
-  }
-
-  public String DaemonProtocol.getFilePattern() {
-    //We use the environment variable HADOOP_LOGFILE to get the
-    //pattern to use in the search.
-    String logDir = System.getProperty("hadoop.log.dir");
-    String daemonLogPattern = System.getProperty("hadoop.log.file");
-    if(daemonLogPattern == null && daemonLogPattern.isEmpty()) {
-      return "*";
-    }
-    return  logDir+File.separator+daemonLogPattern+"*";
-  }
-
-  public int DaemonProtocol.getNumberOfMatchesInLogFile(String pattern,
-      String[] list) throws IOException {
-    StringBuffer filePattern = new StringBuffer(getFilePattern());    
-    String[] cmd = null;
-    if (list != null) {
-      StringBuffer filterExpPattern = new StringBuffer();
-      int index=0;
-      for (String excludeExp : list) {
-        if (index++ < list.length -1) {
-           filterExpPattern.append("grep -v " + excludeExp + " | ");
-        } else {
-           filterExpPattern.append("grep -v " + excludeExp + " | wc -l");
-        }
-      }
-      cmd = new String[] {
-                "bash",
-                "-c",
-                "grep "
-                + pattern + " " + filePattern + " | "
-                + filterExpPattern};
-    } else {
-      cmd = new String[] {
-                "bash",
-                "-c",
-                "grep -c "
-                + pattern + " " + filePattern
-                + " | awk -F: '{s+=$2} END {print s}'" };    
-    }
-    ShellCommandExecutor shexec = new ShellCommandExecutor(cmd);
-    shexec.execute();
-    String output = shexec.getOutput();
-    return Integer.parseInt(output.replaceAll("\n", "").trim());
-  }
-
-  /**
-   * This method is used for suspending the process.
-   * @param pid process id
-   * @throws IOException if an I/O error occurs.
-   * @return true if process is suspended otherwise false.
-   */
-  public boolean DaemonProtocol.suspendProcess(String pid) throws IOException {
-    String suspendCmd = getDaemonConf().get("test.system.hdrc.suspend.cmd",
-        "kill -SIGSTOP");
-    String [] command = {"bash", "-c", suspendCmd + " " + pid};
-    ShellCommandExecutor shexec = new ShellCommandExecutor(command);
-    try {
-      shexec.execute();
-    } catch (Shell.ExitCodeException e) {
-      LOG.warn("suspended process throws an exitcode "
-          + "exception for not being suspended the given process id.");
-      return false;
-    }
-    LOG.info("The suspend process command is :"
-        + shexec.toString()
-        + " and the output for the command is "
-        + shexec.getOutput());
-    return true;
-  }
-
-  /**
-   * This method is used for resuming the process
-   * @param pid process id of suspended process.
-   * @throws IOException if an I/O error occurs.
-   * @return true if suspeneded process is resumed otherwise false.
-   */
-  public boolean DaemonProtocol.resumeProcess(String pid) throws IOException {
-    String resumeCmd = getDaemonConf().get("test.system.hdrc.resume.cmd",
-        "kill -SIGCONT");
-    String [] command = {"bash", "-c", resumeCmd + " " + pid};
-    ShellCommandExecutor shexec = new ShellCommandExecutor(command);
-    try {
-      shexec.execute();
-    } catch(Shell.ExitCodeException e) {
-        LOG.warn("Resume process throws an exitcode "
-          + "exception for not being resumed the given process id.");
-      return false;
-    }
-    LOG.info("The resume process command is :"
-        + shexec.toString()
-        + " and the output for the command is "
-        + shexec.getOutput());
-    return true;
-  }
-
-  private String DaemonProtocol.user = null;
-  
-  public String DaemonProtocol.getDaemonUser() {
-    return user;
-  }
-  
-  public void DaemonProtocol.setUser(String user) {
-    this.user = user;
-  }
-}
-
diff --git a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/Makefile.in b/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/Makefile.in
deleted file mode 100644
index b64b820..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/Makefile.in
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-OBJS=main.o runAs.o
-CC=@CC@
-CFLAGS = @CFLAGS@
-BINARY=runAs
-installdir = @prefix@
-
-all: $(OBJS)
-	$(CC) $(CFLAG) -o $(BINARY) $(OBJS)
-
-main.o: runAs.o main.c
-	$(CC) $(CFLAG) -o main.o -c main.c
-
-runAs.o: runAs.h runAs.c
-	$(CC) $(CFLAG) -o runAs.o -c runAs.c
-
-clean:
-	rm -rf $(BINARY) $(OBJS) $(TESTOBJS)
-
-install: all
-	cp $(BINARY) $(installdir)
-
-uninstall:
-	rm -rf $(installdir)/$(BINARY)
-	rm -rf $(BINARY)
diff --git a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure b/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure
deleted file mode 100644
index acd5bfa..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure
+++ /dev/null
@@ -1,5117 +0,0 @@
-#! /bin/sh
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.65 for runAs 0.1.
-#
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
-# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
-# Inc.
-#
-# This configure script is free software; the Free Software Foundation
-# gives unlimited permission to copy, distribute and modify it.
-## -------------------- ##
-## M4sh Initialization. ##
-## -------------------- ##
-
-# Be more Bourne compatible
-DUALCASE=1; export DUALCASE # for MKS sh
-if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
-  emulate sh
-  NULLCMD=:
-  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
-  # is contrary to our usage.  Disable this feature.
-  alias -g '${1+"$@"}'='"$@"'
-  setopt NO_GLOB_SUBST
-else
-  case `(set -o) 2>/dev/null` in #(
-  *posix*) :
-    set -o posix ;; #(
-  *) :
-     ;;
-esac
-fi
-
-
-as_nl='
-'
-export as_nl
-# Printing a long string crashes Solaris 7 /usr/bin/printf.
-as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
-as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
-as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
-# Prefer a ksh shell builtin over an external printf program on Solaris,
-# but without wasting forks for bash or zsh.
-if test -z "$BASH_VERSION$ZSH_VERSION" \
-    && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
-  as_echo='print -r --'
-  as_echo_n='print -rn --'
-elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
-  as_echo='printf %s\n'
-  as_echo_n='printf %s'
-else
-  if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
-    as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
-    as_echo_n='/usr/ucb/echo -n'
-  else
-    as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
-    as_echo_n_body='eval
-      arg=$1;
-      case $arg in #(
-      *"$as_nl"*)
-	expr "X$arg" : "X\\(.*\\)$as_nl";
-	arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
-      esac;
-      expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
-    '
-    export as_echo_n_body
-    as_echo_n='sh -c $as_echo_n_body as_echo'
-  fi
-  export as_echo_body
-  as_echo='sh -c $as_echo_body as_echo'
-fi
-
-# The user is always right.
-if test "${PATH_SEPARATOR+set}" != set; then
-  PATH_SEPARATOR=:
-  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
-    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
-      PATH_SEPARATOR=';'
-  }
-fi
-
-
-# IFS
-# We need space, tab and new line, in precisely that order.  Quoting is
-# there to prevent editors from complaining about space-tab.
-# (If _AS_PATH_WALK were called with IFS unset, it would disable word
-# splitting by setting IFS to empty value.)
-IFS=" ""	$as_nl"
-
-# Find who we are.  Look in the path if we contain no directory separator.
-case $0 in #((
-  *[\\/]* ) as_myself=$0 ;;
-  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
-  done
-IFS=$as_save_IFS
-
-     ;;
-esac
-# We did not find ourselves, most probably we were run as `sh COMMAND'
-# in which case we are not to be found in the path.
-if test "x$as_myself" = x; then
-  as_myself=$0
-fi
-if test ! -f "$as_myself"; then
-  $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
-  exit 1
-fi
-
-# Unset variables that we do not need and which cause bugs (e.g. in
-# pre-3.0 UWIN ksh).  But do not cause bugs in bash 2.01; the "|| exit 1"
-# suppresses any "Segmentation fault" message there.  '((' could
-# trigger a bug in pdksh 5.2.14.
-for as_var in BASH_ENV ENV MAIL MAILPATH
-do eval test x\${$as_var+set} = xset \
-  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
-done
-PS1='$ '
-PS2='> '
-PS4='+ '
-
-# NLS nuisances.
-LC_ALL=C
-export LC_ALL
-LANGUAGE=C
-export LANGUAGE
-
-# CDPATH.
-(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
-
-if test "x$CONFIG_SHELL" = x; then
-  as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then :
-  emulate sh
-  NULLCMD=:
-  # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which
-  # is contrary to our usage.  Disable this feature.
-  alias -g '\${1+\"\$@\"}'='\"\$@\"'
-  setopt NO_GLOB_SUBST
-else
-  case \`(set -o) 2>/dev/null\` in #(
-  *posix*) :
-    set -o posix ;; #(
-  *) :
-     ;;
-esac
-fi
-"
-  as_required="as_fn_return () { (exit \$1); }
-as_fn_success () { as_fn_return 0; }
-as_fn_failure () { as_fn_return 1; }
-as_fn_ret_success () { return 0; }
-as_fn_ret_failure () { return 1; }
-
-exitcode=0
-as_fn_success || { exitcode=1; echo as_fn_success failed.; }
-as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; }
-as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; }
-as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; }
-if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then :
-
-else
-  exitcode=1; echo positional parameters were not saved.
-fi
-test x\$exitcode = x0 || exit 1"
-  as_suggested="  as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
-  as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
-  eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
-  test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1
-test \$(( 1 + 1 )) = 2 || exit 1"
-  if (eval "$as_required") 2>/dev/null; then :
-  as_have_required=yes
-else
-  as_have_required=no
-fi
-  if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then :
-
-else
-  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-as_found=false
-for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  as_found=:
-  case $as_dir in #(
-	 /*)
-	   for as_base in sh bash ksh sh5; do
-	     # Try only shells that exist, to save several forks.
-	     as_shell=$as_dir/$as_base
-	     if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
-		    { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then :
-  CONFIG_SHELL=$as_shell as_have_required=yes
-		   if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then :
-  break 2
-fi
-fi
-	   done;;
-       esac
-  as_found=false
-done
-$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } &&
-	      { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then :
-  CONFIG_SHELL=$SHELL as_have_required=yes
-fi; }
-IFS=$as_save_IFS
-
-
-      if test "x$CONFIG_SHELL" != x; then :
-  # We cannot yet assume a decent shell, so we have to provide a
-	# neutralization value for shells without unset; and this also
-	# works around shells that cannot unset nonexistent variables.
-	BASH_ENV=/dev/null
-	ENV=/dev/null
-	(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
-	export CONFIG_SHELL
-	exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"}
-fi
-
-    if test x$as_have_required = xno; then :
-  $as_echo "$0: This script requires a shell more modern than all"
-  $as_echo "$0: the shells that I found on your system."
-  if test x${ZSH_VERSION+set} = xset ; then
-    $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should"
-    $as_echo "$0: be upgraded to zsh 4.3.4 or later."
-  else
-    $as_echo "$0: Please tell bug-autoconf@gnu.org about your system,
-$0: including any error possibly output before this
-$0: message. Then install a modern shell, or manually run
-$0: the script under such a shell if you do have one."
-  fi
-  exit 1
-fi
-fi
-fi
-SHELL=${CONFIG_SHELL-/bin/sh}
-export SHELL
-# Unset more variables known to interfere with behavior of common tools.
-CLICOLOR_FORCE= GREP_OPTIONS=
-unset CLICOLOR_FORCE GREP_OPTIONS
-
-## --------------------- ##
-## M4sh Shell Functions. ##
-## --------------------- ##
-# as_fn_unset VAR
-# ---------------
-# Portably unset VAR.
-as_fn_unset ()
-{
-  { eval $1=; unset $1;}
-}
-as_unset=as_fn_unset
-
-# as_fn_set_status STATUS
-# -----------------------
-# Set $? to STATUS, without forking.
-as_fn_set_status ()
-{
-  return $1
-} # as_fn_set_status
-
-# as_fn_exit STATUS
-# -----------------
-# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
-as_fn_exit ()
-{
-  set +e
-  as_fn_set_status $1
-  exit $1
-} # as_fn_exit
-
-# as_fn_mkdir_p
-# -------------
-# Create "$as_dir" as a directory, including parents if necessary.
-as_fn_mkdir_p ()
-{
-
-  case $as_dir in #(
-  -*) as_dir=./$as_dir;;
-  esac
-  test -d "$as_dir" || eval $as_mkdir_p || {
-    as_dirs=
-    while :; do
-      case $as_dir in #(
-      *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
-      *) as_qdir=$as_dir;;
-      esac
-      as_dirs="'$as_qdir' $as_dirs"
-      as_dir=`$as_dirname -- "$as_dir" ||
-$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
-	 X"$as_dir" : 'X\(//\)[^/]' \| \
-	 X"$as_dir" : 'X\(//\)$' \| \
-	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
-$as_echo X"$as_dir" |
-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)[^/].*/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-      test -d "$as_dir" && break
-    done
-    test -z "$as_dirs" || eval "mkdir $as_dirs"
-  } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir"
-
-
-} # as_fn_mkdir_p
-# as_fn_append VAR VALUE
-# ----------------------
-# Append the text in VALUE to the end of the definition contained in VAR. Take
-# advantage of any shell optimizations that allow amortized linear growth over
-# repeated appends, instead of the typical quadratic growth present in naive
-# implementations.
-if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
-  eval 'as_fn_append ()
-  {
-    eval $1+=\$2
-  }'
-else
-  as_fn_append ()
-  {
-    eval $1=\$$1\$2
-  }
-fi # as_fn_append
-
-# as_fn_arith ARG...
-# ------------------
-# Perform arithmetic evaluation on the ARGs, and store the result in the
-# global $as_val. Take advantage of shells that can avoid forks. The arguments
-# must be portable across $(()) and expr.
-if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
-  eval 'as_fn_arith ()
-  {
-    as_val=$(( $* ))
-  }'
-else
-  as_fn_arith ()
-  {
-    as_val=`expr "$@" || test $? -eq 1`
-  }
-fi # as_fn_arith
-
-
-# as_fn_error ERROR [LINENO LOG_FD]
-# ---------------------------------
-# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
-# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
-# script with status $?, using 1 if that was 0.
-as_fn_error ()
-{
-  as_status=$?; test $as_status -eq 0 && as_status=1
-  if test "$3"; then
-    as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-    $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3
-  fi
-  $as_echo "$as_me: error: $1" >&2
-  as_fn_exit $as_status
-} # as_fn_error
-
-if expr a : '\(a\)' >/dev/null 2>&1 &&
-   test "X`expr 00001 : '.*\(...\)'`" = X001; then
-  as_expr=expr
-else
-  as_expr=false
-fi
-
-if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
-  as_basename=basename
-else
-  as_basename=false
-fi
-
-if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
-  as_dirname=dirname
-else
-  as_dirname=false
-fi
-
-as_me=`$as_basename -- "$0" ||
-$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
-	 X"$0" : 'X\(//\)$' \| \
-	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
-$as_echo X/"$0" |
-    sed '/^.*\/\([^/][^/]*\)\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\/\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\/\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-
-# Avoid depending upon Character Ranges.
-as_cr_letters='abcdefghijklmnopqrstuvwxyz'
-as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
-as_cr_Letters=$as_cr_letters$as_cr_LETTERS
-as_cr_digits='0123456789'
-as_cr_alnum=$as_cr_Letters$as_cr_digits
-
-
-  as_lineno_1=$LINENO as_lineno_1a=$LINENO
-  as_lineno_2=$LINENO as_lineno_2a=$LINENO
-  eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" &&
-  test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || {
-  # Blame Lee E. McMahon (1931-1989) for sed's syntax.  :-)
-  sed -n '
-    p
-    /[$]LINENO/=
-  ' <$as_myself |
-    sed '
-      s/[$]LINENO.*/&-/
-      t lineno
-      b
-      :lineno
-      N
-      :loop
-      s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
-      t loop
-      s/-\n.*//
-    ' >$as_me.lineno &&
-  chmod +x "$as_me.lineno" ||
-    { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
-
-  # Don't try to exec as it changes $[0], causing all sort of problems
-  # (the dirname of $[0] is not the place where we might find the
-  # original and so on.  Autoconf is especially sensitive to this).
-  . "./$as_me.lineno"
-  # Exit status is that of the last command.
-  exit
-}
-
-ECHO_C= ECHO_N= ECHO_T=
-case `echo -n x` in #(((((
--n*)
-  case `echo 'xy\c'` in
-  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
-  xy)  ECHO_C='\c';;
-  *)   echo `echo ksh88 bug on AIX 6.1` > /dev/null
-       ECHO_T='	';;
-  esac;;
-*)
-  ECHO_N='-n';;
-esac
-
-rm -f conf$$ conf$$.exe conf$$.file
-if test -d conf$$.dir; then
-  rm -f conf$$.dir/conf$$.file
-else
-  rm -f conf$$.dir
-  mkdir conf$$.dir 2>/dev/null
-fi
-if (echo >conf$$.file) 2>/dev/null; then
-  if ln -s conf$$.file conf$$ 2>/dev/null; then
-    as_ln_s='ln -s'
-    # ... but there are two gotchas:
-    # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
-    # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
-    # In both cases, we have to default to `cp -p'.
-    ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
-      as_ln_s='cp -p'
-  elif ln conf$$.file conf$$ 2>/dev/null; then
-    as_ln_s=ln
-  else
-    as_ln_s='cp -p'
-  fi
-else
-  as_ln_s='cp -p'
-fi
-rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
-rmdir conf$$.dir 2>/dev/null
-
-if mkdir -p . 2>/dev/null; then
-  as_mkdir_p='mkdir -p "$as_dir"'
-else
-  test -d ./-p && rmdir ./-p
-  as_mkdir_p=false
-fi
-
-if test -x / >/dev/null 2>&1; then
-  as_test_x='test -x'
-else
-  if ls -dL / >/dev/null 2>&1; then
-    as_ls_L_option=L
-  else
-    as_ls_L_option=
-  fi
-  as_test_x='
-    eval sh -c '\''
-      if test -d "$1"; then
-	test -d "$1/.";
-      else
-	case $1 in #(
-	-*)set "./$1";;
-	esac;
-	case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
-	???[sx]*):;;*)false;;esac;fi
-    '\'' sh
-  '
-fi
-as_executable_p=$as_test_x
-
-# Sed expression to map a string onto a valid CPP name.
-as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
-
-# Sed expression to map a string onto a valid variable name.
-as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
-
-
-test -n "$DJDIR" || exec 7<&0 </dev/null
-exec 6>&1
-
-# Name of the host.
-# hostname on some systems (SVR3.2, Linux) returns a bogus exit status,
-# so uname gets run too.
-ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
-
-#
-# Initializations.
-#
-ac_default_prefix=/usr/local
-ac_clean_files=
-ac_config_libobj_dir=.
-LIBOBJS=
-cross_compiling=no
-subdirs=
-MFLAGS=
-MAKEFLAGS=
-
-# Identity of this package.
-PACKAGE_NAME='runAs'
-PACKAGE_TARNAME='runas'
-PACKAGE_VERSION='0.1'
-PACKAGE_STRING='runAs 0.1'
-PACKAGE_BUGREPORT=''
-PACKAGE_URL=''
-
-ac_default_prefix=.
-ac_unique_file="main.c"
-# Factoring default headers for most tests.
-ac_includes_default="\
-#include <stdio.h>
-#ifdef HAVE_SYS_TYPES_H
-# include <sys/types.h>
-#endif
-#ifdef HAVE_SYS_STAT_H
-# include <sys/stat.h>
-#endif
-#ifdef STDC_HEADERS
-# include <stdlib.h>
-# include <stddef.h>
-#else
-# ifdef HAVE_STDLIB_H
-#  include <stdlib.h>
-# endif
-#endif
-#ifdef HAVE_STRING_H
-# if !defined STDC_HEADERS && defined HAVE_MEMORY_H
-#  include <memory.h>
-# endif
-# include <string.h>
-#endif
-#ifdef HAVE_STRINGS_H
-# include <strings.h>
-#endif
-#ifdef HAVE_INTTYPES_H
-# include <inttypes.h>
-#endif
-#ifdef HAVE_STDINT_H
-# include <stdint.h>
-#endif
-#ifdef HAVE_UNISTD_H
-# include <unistd.h>
-#endif"
-
-ac_subst_vars='SET_MAKE
-LTLIBOBJS
-LIBOBJS
-EGREP
-GREP
-CPP
-OBJEXT
-EXEEXT
-ac_ct_CC
-CPPFLAGS
-LDFLAGS
-CFLAGS
-CC
-target_alias
-host_alias
-build_alias
-LIBS
-ECHO_T
-ECHO_N
-ECHO_C
-DEFS
-mandir
-localedir
-libdir
-psdir
-pdfdir
-dvidir
-htmldir
-infodir
-docdir
-oldincludedir
-includedir
-localstatedir
-sharedstatedir
-sysconfdir
-datadir
-datarootdir
-libexecdir
-sbindir
-bindir
-program_transform_name
-prefix
-exec_prefix
-PACKAGE_URL
-PACKAGE_BUGREPORT
-PACKAGE_STRING
-PACKAGE_VERSION
-PACKAGE_TARNAME
-PACKAGE_NAME
-PATH_SEPARATOR
-SHELL'
-ac_subst_files=''
-ac_user_opts='
-enable_option_checking
-with_home
-'
-      ac_precious_vars='build_alias
-host_alias
-target_alias
-CC
-CFLAGS
-LDFLAGS
-LIBS
-CPPFLAGS
-CPP'
-
-
-# Initialize some variables set by options.
-ac_init_help=
-ac_init_version=false
-ac_unrecognized_opts=
-ac_unrecognized_sep=
-# The variables have the same names as the options, with
-# dashes changed to underlines.
-cache_file=/dev/null
-exec_prefix=NONE
-no_create=
-no_recursion=
-prefix=NONE
-program_prefix=NONE
-program_suffix=NONE
-program_transform_name=s,x,x,
-silent=
-site=
-srcdir=
-verbose=
-x_includes=NONE
-x_libraries=NONE
-
-# Installation directory options.
-# These are left unexpanded so users can "make install exec_prefix=/foo"
-# and all the variables that are supposed to be based on exec_prefix
-# by default will actually change.
-# Use braces instead of parens because sh, perl, etc. also accept them.
-# (The list follows the same order as the GNU Coding Standards.)
-bindir='${exec_prefix}/bin'
-sbindir='${exec_prefix}/sbin'
-libexecdir='${exec_prefix}/libexec'
-datarootdir='${prefix}/share'
-datadir='${datarootdir}'
-sysconfdir='${prefix}/etc'
-sharedstatedir='${prefix}/com'
-localstatedir='${prefix}/var'
-includedir='${prefix}/include'
-oldincludedir='/usr/include'
-docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
-infodir='${datarootdir}/info'
-htmldir='${docdir}'
-dvidir='${docdir}'
-pdfdir='${docdir}'
-psdir='${docdir}'
-libdir='${exec_prefix}/lib'
-localedir='${datarootdir}/locale'
-mandir='${datarootdir}/man'
-
-ac_prev=
-ac_dashdash=
-for ac_option
-do
-  # If the previous option needs an argument, assign it.
-  if test -n "$ac_prev"; then
-    eval $ac_prev=\$ac_option
-    ac_prev=
-    continue
-  fi
-
-  case $ac_option in
-  *=*)	ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
-  *)	ac_optarg=yes ;;
-  esac
-
-  # Accept the important Cygnus configure options, so we can diagnose typos.
-
-  case $ac_dashdash$ac_option in
-  --)
-    ac_dashdash=yes ;;
-
-  -bindir | --bindir | --bindi | --bind | --bin | --bi)
-    ac_prev=bindir ;;
-  -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
-    bindir=$ac_optarg ;;
-
-  -build | --build | --buil | --bui | --bu)
-    ac_prev=build_alias ;;
-  -build=* | --build=* | --buil=* | --bui=* | --bu=*)
-    build_alias=$ac_optarg ;;
-
-  -cache-file | --cache-file | --cache-fil | --cache-fi \
-  | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
-    ac_prev=cache_file ;;
-  -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
-  | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
-    cache_file=$ac_optarg ;;
-
-  --config-cache | -C)
-    cache_file=config.cache ;;
-
-  -datadir | --datadir | --datadi | --datad)
-    ac_prev=datadir ;;
-  -datadir=* | --datadir=* | --datadi=* | --datad=*)
-    datadir=$ac_optarg ;;
-
-  -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
-  | --dataroo | --dataro | --datar)
-    ac_prev=datarootdir ;;
-  -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
-  | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
-    datarootdir=$ac_optarg ;;
-
-  -disable-* | --disable-*)
-    ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
-    # Reject names that are not valid shell variable names.
-    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
-      as_fn_error "invalid feature name: $ac_useropt"
-    ac_useropt_orig=$ac_useropt
-    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
-    case $ac_user_opts in
-      *"
-"enable_$ac_useropt"
-"*) ;;
-      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig"
-	 ac_unrecognized_sep=', ';;
-    esac
-    eval enable_$ac_useropt=no ;;
-
-  -docdir | --docdir | --docdi | --doc | --do)
-    ac_prev=docdir ;;
-  -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
-    docdir=$ac_optarg ;;
-
-  -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
-    ac_prev=dvidir ;;
-  -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
-    dvidir=$ac_optarg ;;
-
-  -enable-* | --enable-*)
-    ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
-    # Reject names that are not valid shell variable names.
-    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
-      as_fn_error "invalid feature name: $ac_useropt"
-    ac_useropt_orig=$ac_useropt
-    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
-    case $ac_user_opts in
-      *"
-"enable_$ac_useropt"
-"*) ;;
-      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig"
-	 ac_unrecognized_sep=', ';;
-    esac
-    eval enable_$ac_useropt=\$ac_optarg ;;
-
-  -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
-  | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
-  | --exec | --exe | --ex)
-    ac_prev=exec_prefix ;;
-  -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
-  | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
-  | --exec=* | --exe=* | --ex=*)
-    exec_prefix=$ac_optarg ;;
-
-  -gas | --gas | --ga | --g)
-    # Obsolete; use --with-gas.
-    with_gas=yes ;;
-
-  -help | --help | --hel | --he | -h)
-    ac_init_help=long ;;
-  -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
-    ac_init_help=recursive ;;
-  -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
-    ac_init_help=short ;;
-
-  -host | --host | --hos | --ho)
-    ac_prev=host_alias ;;
-  -host=* | --host=* | --hos=* | --ho=*)
-    host_alias=$ac_optarg ;;
-
-  -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
-    ac_prev=htmldir ;;
-  -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
-  | --ht=*)
-    htmldir=$ac_optarg ;;
-
-  -includedir | --includedir | --includedi | --included | --include \
-  | --includ | --inclu | --incl | --inc)
-    ac_prev=includedir ;;
-  -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
-  | --includ=* | --inclu=* | --incl=* | --inc=*)
-    includedir=$ac_optarg ;;
-
-  -infodir | --infodir | --infodi | --infod | --info | --inf)
-    ac_prev=infodir ;;
-  -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
-    infodir=$ac_optarg ;;
-
-  -libdir | --libdir | --libdi | --libd)
-    ac_prev=libdir ;;
-  -libdir=* | --libdir=* | --libdi=* | --libd=*)
-    libdir=$ac_optarg ;;
-
-  -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
-  | --libexe | --libex | --libe)
-    ac_prev=libexecdir ;;
-  -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
-  | --libexe=* | --libex=* | --libe=*)
-    libexecdir=$ac_optarg ;;
-
-  -localedir | --localedir | --localedi | --localed | --locale)
-    ac_prev=localedir ;;
-  -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
-    localedir=$ac_optarg ;;
-
-  -localstatedir | --localstatedir | --localstatedi | --localstated \
-  | --localstate | --localstat | --localsta | --localst | --locals)
-    ac_prev=localstatedir ;;
-  -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
-  | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
-    localstatedir=$ac_optarg ;;
-
-  -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
-    ac_prev=mandir ;;
-  -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
-    mandir=$ac_optarg ;;
-
-  -nfp | --nfp | --nf)
-    # Obsolete; use --without-fp.
-    with_fp=no ;;
-
-  -no-create | --no-create | --no-creat | --no-crea | --no-cre \
-  | --no-cr | --no-c | -n)
-    no_create=yes ;;
-
-  -no-recursion | --no-recursion | --no-recursio | --no-recursi \
-  | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
-    no_recursion=yes ;;
-
-  -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
-  | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
-  | --oldin | --oldi | --old | --ol | --o)
-    ac_prev=oldincludedir ;;
-  -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
-  | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
-  | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
-    oldincludedir=$ac_optarg ;;
-
-  -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
-    ac_prev=prefix ;;
-  -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
-    prefix=$ac_optarg ;;
-
-  -program-prefix | --program-prefix | --program-prefi | --program-pref \
-  | --program-pre | --program-pr | --program-p)
-    ac_prev=program_prefix ;;
-  -program-prefix=* | --program-prefix=* | --program-prefi=* \
-  | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
-    program_prefix=$ac_optarg ;;
-
-  -program-suffix | --program-suffix | --program-suffi | --program-suff \
-  | --program-suf | --program-su | --program-s)
-    ac_prev=program_suffix ;;
-  -program-suffix=* | --program-suffix=* | --program-suffi=* \
-  | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
-    program_suffix=$ac_optarg ;;
-
-  -program-transform-name | --program-transform-name \
-  | --program-transform-nam | --program-transform-na \
-  | --program-transform-n | --program-transform- \
-  | --program-transform | --program-transfor \
-  | --program-transfo | --program-transf \
-  | --program-trans | --program-tran \
-  | --progr-tra | --program-tr | --program-t)
-    ac_prev=program_transform_name ;;
-  -program-transform-name=* | --program-transform-name=* \
-  | --program-transform-nam=* | --program-transform-na=* \
-  | --program-transform-n=* | --program-transform-=* \
-  | --program-transform=* | --program-transfor=* \
-  | --program-transfo=* | --program-transf=* \
-  | --program-trans=* | --program-tran=* \
-  | --progr-tra=* | --program-tr=* | --program-t=*)
-    program_transform_name=$ac_optarg ;;
-
-  -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
-    ac_prev=pdfdir ;;
-  -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
-    pdfdir=$ac_optarg ;;
-
-  -psdir | --psdir | --psdi | --psd | --ps)
-    ac_prev=psdir ;;
-  -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
-    psdir=$ac_optarg ;;
-
-  -q | -quiet | --quiet | --quie | --qui | --qu | --q \
-  | -silent | --silent | --silen | --sile | --sil)
-    silent=yes ;;
-
-  -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
-    ac_prev=sbindir ;;
-  -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
-  | --sbi=* | --sb=*)
-    sbindir=$ac_optarg ;;
-
-  -sharedstatedir | --sharedstatedir | --sharedstatedi \
-  | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
-  | --sharedst | --shareds | --shared | --share | --shar \
-  | --sha | --sh)
-    ac_prev=sharedstatedir ;;
-  -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
-  | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
-  | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
-  | --sha=* | --sh=*)
-    sharedstatedir=$ac_optarg ;;
-
-  -site | --site | --sit)
-    ac_prev=site ;;
-  -site=* | --site=* | --sit=*)
-    site=$ac_optarg ;;
-
-  -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
-    ac_prev=srcdir ;;
-  -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
-    srcdir=$ac_optarg ;;
-
-  -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
-  | --syscon | --sysco | --sysc | --sys | --sy)
-    ac_prev=sysconfdir ;;
-  -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
-  | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
-    sysconfdir=$ac_optarg ;;
-
-  -target | --target | --targe | --targ | --tar | --ta | --t)
-    ac_prev=target_alias ;;
-  -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
-    target_alias=$ac_optarg ;;
-
-  -v | -verbose | --verbose | --verbos | --verbo | --verb)
-    verbose=yes ;;
-
-  -version | --version | --versio | --versi | --vers | -V)
-    ac_init_version=: ;;
-
-  -with-* | --with-*)
-    ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
-    # Reject names that are not valid shell variable names.
-    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
-      as_fn_error "invalid package name: $ac_useropt"
-    ac_useropt_orig=$ac_useropt
-    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
-    case $ac_user_opts in
-      *"
-"with_$ac_useropt"
-"*) ;;
-      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig"
-	 ac_unrecognized_sep=', ';;
-    esac
-    eval with_$ac_useropt=\$ac_optarg ;;
-
-  -without-* | --without-*)
-    ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
-    # Reject names that are not valid shell variable names.
-    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
-      as_fn_error "invalid package name: $ac_useropt"
-    ac_useropt_orig=$ac_useropt
-    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
-    case $ac_user_opts in
-      *"
-"with_$ac_useropt"
-"*) ;;
-      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig"
-	 ac_unrecognized_sep=', ';;
-    esac
-    eval with_$ac_useropt=no ;;
-
-  --x)
-    # Obsolete; use --with-x.
-    with_x=yes ;;
-
-  -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
-  | --x-incl | --x-inc | --x-in | --x-i)
-    ac_prev=x_includes ;;
-  -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
-  | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
-    x_includes=$ac_optarg ;;
-
-  -x-libraries | --x-libraries | --x-librarie | --x-librari \
-  | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
-    ac_prev=x_libraries ;;
-  -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
-  | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
-    x_libraries=$ac_optarg ;;
-
-  -*) as_fn_error "unrecognized option: \`$ac_option'
-Try \`$0 --help' for more information."
-    ;;
-
-  *=*)
-    ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
-    # Reject names that are not valid shell variable names.
-    case $ac_envvar in #(
-      '' | [0-9]* | *[!_$as_cr_alnum]* )
-      as_fn_error "invalid variable name: \`$ac_envvar'" ;;
-    esac
-    eval $ac_envvar=\$ac_optarg
-    export $ac_envvar ;;
-
-  *)
-    # FIXME: should be removed in autoconf 3.0.
-    $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2
-    expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
-      $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2
-    : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}
-    ;;
-
-  esac
-done
-
-if test -n "$ac_prev"; then
-  ac_option=--`echo $ac_prev | sed 's/_/-/g'`
-  as_fn_error "missing argument to $ac_option"
-fi
-
-if test -n "$ac_unrecognized_opts"; then
-  case $enable_option_checking in
-    no) ;;
-    fatal) as_fn_error "unrecognized options: $ac_unrecognized_opts" ;;
-    *)     $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
-  esac
-fi
-
-# Check all directory arguments for consistency.
-for ac_var in	exec_prefix prefix bindir sbindir libexecdir datarootdir \
-		datadir sysconfdir sharedstatedir localstatedir includedir \
-		oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
-		libdir localedir mandir
-do
-  eval ac_val=\$$ac_var
-  # Remove trailing slashes.
-  case $ac_val in
-    */ )
-      ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'`
-      eval $ac_var=\$ac_val;;
-  esac
-  # Be sure to have absolute directory names.
-  case $ac_val in
-    [\\/$]* | ?:[\\/]* )  continue;;
-    NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
-  esac
-  as_fn_error "expected an absolute directory name for --$ac_var: $ac_val"
-done
-
-# There might be people who depend on the old broken behavior: `$host'
-# used to hold the argument of --host etc.
-# FIXME: To remove some day.
-build=$build_alias
-host=$host_alias
-target=$target_alias
-
-# FIXME: To remove some day.
-if test "x$host_alias" != x; then
-  if test "x$build_alias" = x; then
-    cross_compiling=maybe
-    $as_echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host.
-    If a cross compiler is detected then cross compile mode will be used." >&2
-  elif test "x$build_alias" != "x$host_alias"; then
-    cross_compiling=yes
-  fi
-fi
-
-ac_tool_prefix=
-test -n "$host_alias" && ac_tool_prefix=$host_alias-
-
-test "$silent" = yes && exec 6>/dev/null
-
-
-ac_pwd=`pwd` && test -n "$ac_pwd" &&
-ac_ls_di=`ls -di .` &&
-ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
-  as_fn_error "working directory cannot be determined"
-test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
-  as_fn_error "pwd does not report name of working directory"
-
-
-# Find the source files, if location was not specified.
-if test -z "$srcdir"; then
-  ac_srcdir_defaulted=yes
-  # Try the directory containing this script, then the parent directory.
-  ac_confdir=`$as_dirname -- "$as_myself" ||
-$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
-	 X"$as_myself" : 'X\(//\)[^/]' \| \
-	 X"$as_myself" : 'X\(//\)$' \| \
-	 X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
-$as_echo X"$as_myself" |
-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)[^/].*/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-  srcdir=$ac_confdir
-  if test ! -r "$srcdir/$ac_unique_file"; then
-    srcdir=..
-  fi
-else
-  ac_srcdir_defaulted=no
-fi
-if test ! -r "$srcdir/$ac_unique_file"; then
-  test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
-  as_fn_error "cannot find sources ($ac_unique_file) in $srcdir"
-fi
-ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
-ac_abs_confdir=`(
-	cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error "$ac_msg"
-	pwd)`
-# When building in place, set srcdir=.
-if test "$ac_abs_confdir" = "$ac_pwd"; then
-  srcdir=.
-fi
-# Remove unnecessary trailing slashes from srcdir.
-# Double slashes in file names in object file debugging info
-# mess up M-x gdb in Emacs.
-case $srcdir in
-*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
-esac
-for ac_var in $ac_precious_vars; do
-  eval ac_env_${ac_var}_set=\${${ac_var}+set}
-  eval ac_env_${ac_var}_value=\$${ac_var}
-  eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
-  eval ac_cv_env_${ac_var}_value=\$${ac_var}
-done
-
-#
-# Report the --help message.
-#
-if test "$ac_init_help" = "long"; then
-  # Omit some internal or obsolete options to make the list less imposing.
-  # This message is too long to be a string in the A/UX 3.1 sh.
-  cat <<_ACEOF
-\`configure' configures runAs 0.1 to adapt to many kinds of systems.
-
-Usage: $0 [OPTION]... [VAR=VALUE]...
-
-To assign environment variables (e.g., CC, CFLAGS...), specify them as
-VAR=VALUE.  See below for descriptions of some of the useful variables.
-
-Defaults for the options are specified in brackets.
-
-Configuration:
-  -h, --help              display this help and exit
-      --help=short        display options specific to this package
-      --help=recursive    display the short help of all the included packages
-  -V, --version           display version information and exit
-  -q, --quiet, --silent   do not print \`checking...' messages
-      --cache-file=FILE   cache test results in FILE [disabled]
-  -C, --config-cache      alias for \`--cache-file=config.cache'
-  -n, --no-create         do not create output files
-      --srcdir=DIR        find the sources in DIR [configure dir or \`..']
-
-Installation directories:
-  --prefix=PREFIX         install architecture-independent files in PREFIX
-                          [$ac_default_prefix]
-  --exec-prefix=EPREFIX   install architecture-dependent files in EPREFIX
-                          [PREFIX]
-
-By default, \`make install' will install all the files in
-\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc.  You can specify
-an installation prefix other than \`$ac_default_prefix' using \`--prefix',
-for instance \`--prefix=\$HOME'.
-
-For better control, use the options below.
-
-Fine tuning of the installation directories:
-  --bindir=DIR            user executables [EPREFIX/bin]
-  --sbindir=DIR           system admin executables [EPREFIX/sbin]
-  --libexecdir=DIR        program executables [EPREFIX/libexec]
-  --sysconfdir=DIR        read-only single-machine data [PREFIX/etc]
-  --sharedstatedir=DIR    modifiable architecture-independent data [PREFIX/com]
-  --localstatedir=DIR     modifiable single-machine data [PREFIX/var]
-  --libdir=DIR            object code libraries [EPREFIX/lib]
-  --includedir=DIR        C header files [PREFIX/include]
-  --oldincludedir=DIR     C header files for non-gcc [/usr/include]
-  --datarootdir=DIR       read-only arch.-independent data root [PREFIX/share]
-  --datadir=DIR           read-only architecture-independent data [DATAROOTDIR]
-  --infodir=DIR           info documentation [DATAROOTDIR/info]
-  --localedir=DIR         locale-dependent data [DATAROOTDIR/locale]
-  --mandir=DIR            man documentation [DATAROOTDIR/man]
-  --docdir=DIR            documentation root [DATAROOTDIR/doc/runas]
-  --htmldir=DIR           html documentation [DOCDIR]
-  --dvidir=DIR            dvi documentation [DOCDIR]
-  --pdfdir=DIR            pdf documentation [DOCDIR]
-  --psdir=DIR             ps documentation [DOCDIR]
-_ACEOF
-
-  cat <<\_ACEOF
-_ACEOF
-fi
-
-if test -n "$ac_init_help"; then
-  case $ac_init_help in
-     short | recursive ) echo "Configuration of runAs 0.1:";;
-   esac
-  cat <<\_ACEOF
-
-Optional Packages:
-  --with-PACKAGE[=ARG]    use PACKAGE [ARG=yes]
-  --without-PACKAGE       do not use PACKAGE (same as --with-PACKAGE=no)
---with-home path to hadoop home dir
-
-Some influential environment variables:
-  CC          C compiler command
-  CFLAGS      C compiler flags
-  LDFLAGS     linker flags, e.g. -L<lib dir> if you have libraries in a
-              nonstandard directory <lib dir>
-  LIBS        libraries to pass to the linker, e.g. -l<library>
-  CPPFLAGS    (Objective) C/C++ preprocessor flags, e.g. -I<include dir> if
-              you have headers in a nonstandard directory <include dir>
-  CPP         C preprocessor
-
-Use these variables to override the choices made by `configure' or to help
-it to find libraries and programs with nonstandard names/locations.
-
-Report bugs to the package provider.
-_ACEOF
-ac_status=$?
-fi
-
-if test "$ac_init_help" = "recursive"; then
-  # If there are subdirs, report their specific --help.
-  for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
-    test -d "$ac_dir" ||
-      { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } ||
-      continue
-    ac_builddir=.
-
-case "$ac_dir" in
-.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
-*)
-  ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
-  # A ".." for each directory in $ac_dir_suffix.
-  ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
-  case $ac_top_builddir_sub in
-  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
-  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
-  esac ;;
-esac
-ac_abs_top_builddir=$ac_pwd
-ac_abs_builddir=$ac_pwd$ac_dir_suffix
-# for backward compatibility:
-ac_top_builddir=$ac_top_build_prefix
-
-case $srcdir in
-  .)  # We are building in place.
-    ac_srcdir=.
-    ac_top_srcdir=$ac_top_builddir_sub
-    ac_abs_top_srcdir=$ac_pwd ;;
-  [\\/]* | ?:[\\/]* )  # Absolute name.
-    ac_srcdir=$srcdir$ac_dir_suffix;
-    ac_top_srcdir=$srcdir
-    ac_abs_top_srcdir=$srcdir ;;
-  *) # Relative name.
-    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
-    ac_top_srcdir=$ac_top_build_prefix$srcdir
-    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
-esac
-ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
-
-    cd "$ac_dir" || { ac_status=$?; continue; }
-    # Check for guested configure.
-    if test -f "$ac_srcdir/configure.gnu"; then
-      echo &&
-      $SHELL "$ac_srcdir/configure.gnu" --help=recursive
-    elif test -f "$ac_srcdir/configure"; then
-      echo &&
-      $SHELL "$ac_srcdir/configure" --help=recursive
-    else
-      $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
-    fi || ac_status=$?
-    cd "$ac_pwd" || { ac_status=$?; break; }
-  done
-fi
-
-test -n "$ac_init_help" && exit $ac_status
-if $ac_init_version; then
-  cat <<\_ACEOF
-runAs configure 0.1
-generated by GNU Autoconf 2.65
-
-Copyright (C) 2009 Free Software Foundation, Inc.
-This configure script is free software; the Free Software Foundation
-gives unlimited permission to copy, distribute and modify it.
-_ACEOF
-  exit
-fi
-
-## ------------------------ ##
-## Autoconf initialization. ##
-## ------------------------ ##
-
-# ac_fn_c_try_compile LINENO
-# --------------------------
-# Try to compile conftest.$ac_ext, and return whether this succeeded.
-ac_fn_c_try_compile ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  rm -f conftest.$ac_objext
-  if { { ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_compile") 2>conftest.err
-  ac_status=$?
-  if test -s conftest.err; then
-    grep -v '^ *+' conftest.err >conftest.er1
-    cat conftest.er1 >&5
-    mv -f conftest.er1 conftest.err
-  fi
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then :
-  ac_retval=0
-else
-  $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_retval=1
-fi
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-  as_fn_set_status $ac_retval
-
-} # ac_fn_c_try_compile
-
-# ac_fn_c_try_cpp LINENO
-# ----------------------
-# Try to preprocess conftest.$ac_ext, and return whether this succeeded.
-ac_fn_c_try_cpp ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  if { { ac_try="$ac_cpp conftest.$ac_ext"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err
-  ac_status=$?
-  if test -s conftest.err; then
-    grep -v '^ *+' conftest.err >conftest.er1
-    cat conftest.er1 >&5
-    mv -f conftest.er1 conftest.err
-  fi
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; } >/dev/null && {
-	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       }; then :
-  ac_retval=0
-else
-  $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-    ac_retval=1
-fi
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-  as_fn_set_status $ac_retval
-
-} # ac_fn_c_try_cpp
-
-# ac_fn_c_try_run LINENO
-# ----------------------
-# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes
-# that executables *can* be run.
-ac_fn_c_try_run ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  if { { ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_link") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; } && { ac_try='./conftest$ac_exeext'
-  { { case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_try") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }; }; then :
-  ac_retval=0
-else
-  $as_echo "$as_me: program exited with status $ac_status" >&5
-       $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-       ac_retval=$ac_status
-fi
-  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-  as_fn_set_status $ac_retval
-
-} # ac_fn_c_try_run
-
-# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES
-# -------------------------------------------------------
-# Tests whether HEADER exists, giving a warning if it cannot be compiled using
-# the include files in INCLUDES and setting the cache variable VAR
-# accordingly.
-ac_fn_c_check_header_mongrel ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
-  $as_echo_n "(cached) " >&6
-fi
-eval ac_res=\$$3
-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
-else
-  # Is the header compilable?
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5
-$as_echo_n "checking $2 usability... " >&6; }
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-$4
-#include <$2>
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_header_compiler=yes
-else
-  ac_header_compiler=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5
-$as_echo "$ac_header_compiler" >&6; }
-
-# Is the header present?
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5
-$as_echo_n "checking $2 presence... " >&6; }
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <$2>
-_ACEOF
-if ac_fn_c_try_cpp "$LINENO"; then :
-  ac_header_preproc=yes
-else
-  ac_header_preproc=no
-fi
-rm -f conftest.err conftest.$ac_ext
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5
-$as_echo "$ac_header_preproc" >&6; }
-
-# So?  What about this header?
-case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #((
-  yes:no: )
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5
-$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;}
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
-$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
-    ;;
-  no:yes:* )
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5
-$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;}
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2:     check for missing prerequisite headers?" >&5
-$as_echo "$as_me: WARNING: $2:     check for missing prerequisite headers?" >&2;}
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5
-$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;}
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2:     section \"Present But Cannot Be Compiled\"" >&5
-$as_echo "$as_me: WARNING: $2:     section \"Present But Cannot Be Compiled\"" >&2;}
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
-$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
-    ;;
-esac
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
-  $as_echo_n "(cached) " >&6
-else
-  eval "$3=\$ac_header_compiler"
-fi
-eval ac_res=\$$3
-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
-fi
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-
-} # ac_fn_c_check_header_mongrel
-
-# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES
-# -------------------------------------------------------
-# Tests whether HEADER exists and can be compiled using the include files in
-# INCLUDES, setting the cache variable VAR accordingly.
-ac_fn_c_check_header_compile ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-$4
-#include <$2>
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  eval "$3=yes"
-else
-  eval "$3=no"
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-eval ac_res=\$$3
-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-
-} # ac_fn_c_check_header_compile
-
-# ac_fn_c_check_type LINENO TYPE VAR INCLUDES
-# -------------------------------------------
-# Tests whether TYPE exists after having included INCLUDES, setting cache
-# variable VAR accordingly.
-ac_fn_c_check_type ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
-  $as_echo_n "(cached) " >&6
-else
-  eval "$3=no"
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-$4
-int
-main ()
-{
-if (sizeof ($2))
-	 return 0;
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-$4
-int
-main ()
-{
-if (sizeof (($2)))
-	    return 0;
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-
-else
-  eval "$3=yes"
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-eval ac_res=\$$3
-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-
-} # ac_fn_c_check_type
-
-# ac_fn_c_try_link LINENO
-# -----------------------
-# Try to link conftest.$ac_ext, and return whether this succeeded.
-ac_fn_c_try_link ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  rm -f conftest.$ac_objext conftest$ac_exeext
-  if { { ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_link") 2>conftest.err
-  ac_status=$?
-  if test -s conftest.err; then
-    grep -v '^ *+' conftest.err >conftest.er1
-    cat conftest.er1 >&5
-    mv -f conftest.er1 conftest.err
-  fi
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest$ac_exeext && {
-	 test "$cross_compiling" = yes ||
-	 $as_test_x conftest$ac_exeext
-       }; then :
-  ac_retval=0
-else
-  $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_retval=1
-fi
-  # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
-  # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
-  # interfere with the next link command; also delete a directory that is
-  # left behind by Apple's compiler.  We do this before executing the actions.
-  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-  as_fn_set_status $ac_retval
-
-} # ac_fn_c_try_link
-
-# ac_fn_c_check_func LINENO FUNC VAR
-# ----------------------------------
-# Tests whether FUNC exists, setting the cache variable VAR accordingly
-ac_fn_c_check_func ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-/* Define $2 to an innocuous variant, in case <limits.h> declares $2.
-   For example, HP-UX 11i <limits.h> declares gettimeofday.  */
-#define $2 innocuous_$2
-
-/* System header to define __stub macros and hopefully few prototypes,
-    which can conflict with char $2 (); below.
-    Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
-    <limits.h> exists even on freestanding compilers.  */
-
-#ifdef __STDC__
-# include <limits.h>
-#else
-# include <assert.h>
-#endif
-
-#undef $2
-
-/* Override any GCC internal prototype to avoid an error.
-   Use char because int might match the return type of a GCC
-   builtin and then its argument prototype would still apply.  */
-#ifdef __cplusplus
-extern "C"
-#endif
-char $2 ();
-/* The GNU C library defines this for functions which it implements
-    to always fail with ENOSYS.  Some functions are actually named
-    something starting with __ and the normal name is an alias.  */
-#if defined __stub_$2 || defined __stub___$2
-choke me
-#endif
-
-int
-main ()
-{
-return $2 ();
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
-  eval "$3=yes"
-else
-  eval "$3=no"
-fi
-rm -f core conftest.err conftest.$ac_objext \
-    conftest$ac_exeext conftest.$ac_ext
-fi
-eval ac_res=\$$3
-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-
-} # ac_fn_c_check_func
-cat >config.log <<_ACEOF
-This file contains any messages produced by compilers while
-running configure, to aid debugging if configure makes a mistake.
-
-It was created by runAs $as_me 0.1, which was
-generated by GNU Autoconf 2.65.  Invocation command line was
-
-  $ $0 $@
-
-_ACEOF
-exec 5>>config.log
-{
-cat <<_ASUNAME
-## --------- ##
-## Platform. ##
-## --------- ##
-
-hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
-uname -m = `(uname -m) 2>/dev/null || echo unknown`
-uname -r = `(uname -r) 2>/dev/null || echo unknown`
-uname -s = `(uname -s) 2>/dev/null || echo unknown`
-uname -v = `(uname -v) 2>/dev/null || echo unknown`
-
-/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
-/bin/uname -X     = `(/bin/uname -X) 2>/dev/null     || echo unknown`
-
-/bin/arch              = `(/bin/arch) 2>/dev/null              || echo unknown`
-/usr/bin/arch -k       = `(/usr/bin/arch -k) 2>/dev/null       || echo unknown`
-/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
-/usr/bin/hostinfo      = `(/usr/bin/hostinfo) 2>/dev/null      || echo unknown`
-/bin/machine           = `(/bin/machine) 2>/dev/null           || echo unknown`
-/usr/bin/oslevel       = `(/usr/bin/oslevel) 2>/dev/null       || echo unknown`
-/bin/universe          = `(/bin/universe) 2>/dev/null          || echo unknown`
-
-_ASUNAME
-
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    $as_echo "PATH: $as_dir"
-  done
-IFS=$as_save_IFS
-
-} >&5
-
-cat >&5 <<_ACEOF
-
-
-## ----------- ##
-## Core tests. ##
-## ----------- ##
-
-_ACEOF
-
-
-# Keep a trace of the command line.
-# Strip out --no-create and --no-recursion so they do not pile up.
-# Strip out --silent because we don't want to record it for future runs.
-# Also quote any args containing shell meta-characters.
-# Make two passes to allow for proper duplicate-argument suppression.
-ac_configure_args=
-ac_configure_args0=
-ac_configure_args1=
-ac_must_keep_next=false
-for ac_pass in 1 2
-do
-  for ac_arg
-  do
-    case $ac_arg in
-    -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
-    -q | -quiet | --quiet | --quie | --qui | --qu | --q \
-    | -silent | --silent | --silen | --sile | --sil)
-      continue ;;
-    *\'*)
-      ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
-    esac
-    case $ac_pass in
-    1) as_fn_append ac_configure_args0 " '$ac_arg'" ;;
-    2)
-      as_fn_append ac_configure_args1 " '$ac_arg'"
-      if test $ac_must_keep_next = true; then
-	ac_must_keep_next=false # Got value, back to normal.
-      else
-	case $ac_arg in
-	  *=* | --config-cache | -C | -disable-* | --disable-* \
-	  | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
-	  | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
-	  | -with-* | --with-* | -without-* | --without-* | --x)
-	    case "$ac_configure_args0 " in
-	      "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
-	    esac
-	    ;;
-	  -* ) ac_must_keep_next=true ;;
-	esac
-      fi
-      as_fn_append ac_configure_args " '$ac_arg'"
-      ;;
-    esac
-  done
-done
-{ ac_configure_args0=; unset ac_configure_args0;}
-{ ac_configure_args1=; unset ac_configure_args1;}
-
-# When interrupted or exit'd, cleanup temporary files, and complete
-# config.log.  We remove comments because anyway the quotes in there
-# would cause problems or look ugly.
-# WARNING: Use '\'' to represent an apostrophe within the trap.
-# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
-trap 'exit_status=$?
-  # Save into config.log some information that might help in debugging.
-  {
-    echo
-
-    cat <<\_ASBOX
-## ---------------- ##
-## Cache variables. ##
-## ---------------- ##
-_ASBOX
-    echo
-    # The following way of writing the cache mishandles newlines in values,
-(
-  for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
-    eval ac_val=\$$ac_var
-    case $ac_val in #(
-    *${as_nl}*)
-      case $ac_var in #(
-      *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
-$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
-      esac
-      case $ac_var in #(
-      _ | IFS | as_nl) ;; #(
-      BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
-      *) { eval $ac_var=; unset $ac_var;} ;;
-      esac ;;
-    esac
-  done
-  (set) 2>&1 |
-    case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
-    *${as_nl}ac_space=\ *)
-      sed -n \
-	"s/'\''/'\''\\\\'\'''\''/g;
-	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
-      ;; #(
-    *)
-      sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
-      ;;
-    esac |
-    sort
-)
-    echo
-
-    cat <<\_ASBOX
-## ----------------- ##
-## Output variables. ##
-## ----------------- ##
-_ASBOX
-    echo
-    for ac_var in $ac_subst_vars
-    do
-      eval ac_val=\$$ac_var
-      case $ac_val in
-      *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
-      esac
-      $as_echo "$ac_var='\''$ac_val'\''"
-    done | sort
-    echo
-
-    if test -n "$ac_subst_files"; then
-      cat <<\_ASBOX
-## ------------------- ##
-## File substitutions. ##
-## ------------------- ##
-_ASBOX
-      echo
-      for ac_var in $ac_subst_files
-      do
-	eval ac_val=\$$ac_var
-	case $ac_val in
-	*\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
-	esac
-	$as_echo "$ac_var='\''$ac_val'\''"
-      done | sort
-      echo
-    fi
-
-    if test -s confdefs.h; then
-      cat <<\_ASBOX
-## ----------- ##
-## confdefs.h. ##
-## ----------- ##
-_ASBOX
-      echo
-      cat confdefs.h
-      echo
-    fi
-    test "$ac_signal" != 0 &&
-      $as_echo "$as_me: caught signal $ac_signal"
-    $as_echo "$as_me: exit $exit_status"
-  } >&5
-  rm -f core *.core core.conftest.* &&
-    rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
-    exit $exit_status
-' 0
-for ac_signal in 1 2 13 15; do
-  trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal
-done
-ac_signal=0
-
-# confdefs.h avoids OS command line length limits that DEFS can exceed.
-rm -f -r conftest* confdefs.h
-
-$as_echo "/* confdefs.h */" > confdefs.h
-
-# Predefined preprocessor variables.
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_NAME "$PACKAGE_NAME"
-_ACEOF
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
-_ACEOF
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_VERSION "$PACKAGE_VERSION"
-_ACEOF
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_STRING "$PACKAGE_STRING"
-_ACEOF
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
-_ACEOF
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_URL "$PACKAGE_URL"
-_ACEOF
-
-
-# Let the site file select an alternate cache file if it wants to.
-# Prefer an explicitly selected file to automatically selected ones.
-ac_site_file1=NONE
-ac_site_file2=NONE
-if test -n "$CONFIG_SITE"; then
-  ac_site_file1=$CONFIG_SITE
-elif test "x$prefix" != xNONE; then
-  ac_site_file1=$prefix/share/config.site
-  ac_site_file2=$prefix/etc/config.site
-else
-  ac_site_file1=$ac_default_prefix/share/config.site
-  ac_site_file2=$ac_default_prefix/etc/config.site
-fi
-for ac_site_file in "$ac_site_file1" "$ac_site_file2"
-do
-  test "x$ac_site_file" = xNONE && continue
-  if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then
-    { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
-$as_echo "$as_me: loading site script $ac_site_file" >&6;}
-    sed 's/^/| /' "$ac_site_file" >&5
-    . "$ac_site_file"
-  fi
-done
-
-if test -r "$cache_file"; then
-  # Some versions of bash will fail to source /dev/null (special files
-  # actually), so we avoid doing that.  DJGPP emulates it as a regular file.
-  if test /dev/null != "$cache_file" && test -f "$cache_file"; then
-    { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5
-$as_echo "$as_me: loading cache $cache_file" >&6;}
-    case $cache_file in
-      [\\/]* | ?:[\\/]* ) . "$cache_file";;
-      *)                      . "./$cache_file";;
-    esac
-  fi
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5
-$as_echo "$as_me: creating cache $cache_file" >&6;}
-  >$cache_file
-fi
-
-# Check that the precious variables saved in the cache have kept the same
-# value.
-ac_cache_corrupted=false
-for ac_var in $ac_precious_vars; do
-  eval ac_old_set=\$ac_cv_env_${ac_var}_set
-  eval ac_new_set=\$ac_env_${ac_var}_set
-  eval ac_old_val=\$ac_cv_env_${ac_var}_value
-  eval ac_new_val=\$ac_env_${ac_var}_value
-  case $ac_old_set,$ac_new_set in
-    set,)
-      { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
-$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
-      ac_cache_corrupted=: ;;
-    ,set)
-      { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5
-$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
-      ac_cache_corrupted=: ;;
-    ,);;
-    *)
-      if test "x$ac_old_val" != "x$ac_new_val"; then
-	# differences in whitespace do not lead to failure.
-	ac_old_val_w=`echo x $ac_old_val`
-	ac_new_val_w=`echo x $ac_new_val`
-	if test "$ac_old_val_w" != "$ac_new_val_w"; then
-	  { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5
-$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
-	  ac_cache_corrupted=:
-	else
-	  { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
-$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
-	  eval $ac_var=\$ac_old_val
-	fi
-	{ $as_echo "$as_me:${as_lineno-$LINENO}:   former value:  \`$ac_old_val'" >&5
-$as_echo "$as_me:   former value:  \`$ac_old_val'" >&2;}
-	{ $as_echo "$as_me:${as_lineno-$LINENO}:   current value: \`$ac_new_val'" >&5
-$as_echo "$as_me:   current value: \`$ac_new_val'" >&2;}
-      fi;;
-  esac
-  # Pass precious variables to config.status.
-  if test "$ac_new_set" = set; then
-    case $ac_new_val in
-    *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
-    *) ac_arg=$ac_var=$ac_new_val ;;
-    esac
-    case " $ac_configure_args " in
-      *" '$ac_arg' "*) ;; # Avoid dups.  Use of quotes ensures accuracy.
-      *) as_fn_append ac_configure_args " '$ac_arg'" ;;
-    esac
-  fi
-done
-if $ac_cache_corrupted; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-  { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
-$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;}
-  as_fn_error "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5
-fi
-## -------------------- ##
-## Main body of script. ##
-## -------------------- ##
-
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-
-
-#changing default prefix value to empty string, so that binary does not
-#gets installed within system
-
-
-#add new arguments --with-home
-
-# Check whether --with-home was given.
-if test "${with_home+set}" = set; then :
-  withval=$with_home;
-fi
-
-
-ac_config_headers="$ac_config_headers runAs.h"
-
-
-# Checks for programs.
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-if test -n "$ac_tool_prefix"; then
-  # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
-set dummy ${ac_tool_prefix}gcc; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -n "$CC"; then
-  ac_cv_prog_CC="$CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_CC="${ac_tool_prefix}gcc"
-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-  done
-IFS=$as_save_IFS
-
-fi
-fi
-CC=$ac_cv_prog_CC
-if test -n "$CC"; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
-$as_echo "$CC" >&6; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
-fi
-if test -z "$ac_cv_prog_CC"; then
-  ac_ct_CC=$CC
-  # Extract the first word of "gcc", so it can be a program name with args.
-set dummy gcc; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_CC+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -n "$ac_ct_CC"; then
-  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_ac_ct_CC="gcc"
-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-  done
-IFS=$as_save_IFS
-
-fi
-fi
-ac_ct_CC=$ac_cv_prog_ac_ct_CC
-if test -n "$ac_ct_CC"; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
-$as_echo "$ac_ct_CC" >&6; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-  if test "x$ac_ct_CC" = x; then
-    CC=""
-  else
-    case $cross_compiling:$ac_tool_warned in
-yes:)
-{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
-$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
-ac_tool_warned=yes ;;
-esac
-    CC=$ac_ct_CC
-  fi
-else
-  CC="$ac_cv_prog_CC"
-fi
-
-if test -z "$CC"; then
-          if test -n "$ac_tool_prefix"; then
-    # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
-set dummy ${ac_tool_prefix}cc; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -n "$CC"; then
-  ac_cv_prog_CC="$CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_CC="${ac_tool_prefix}cc"
-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-  done
-IFS=$as_save_IFS
-
-fi
-fi
-CC=$ac_cv_prog_CC
-if test -n "$CC"; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
-$as_echo "$CC" >&6; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
-  fi
-fi
-if test -z "$CC"; then
-  # Extract the first word of "cc", so it can be a program name with args.
-set dummy cc; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -n "$CC"; then
-  ac_cv_prog_CC="$CC" # Let the user override the test.
-else
-  ac_prog_rejected=no
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
-       ac_prog_rejected=yes
-       continue
-     fi
-    ac_cv_prog_CC="cc"
-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-  done
-IFS=$as_save_IFS
-
-if test $ac_prog_rejected = yes; then
-  # We found a bogon in the path, so make sure we never use it.
-  set dummy $ac_cv_prog_CC
-  shift
-  if test $# != 0; then
-    # We chose a different compiler from the bogus one.
-    # However, it has the same basename, so the bogon will be chosen
-    # first if we set CC to just the basename; use the full file name.
-    shift
-    ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
-  fi
-fi
-fi
-fi
-CC=$ac_cv_prog_CC
-if test -n "$CC"; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
-$as_echo "$CC" >&6; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
-fi
-if test -z "$CC"; then
-  if test -n "$ac_tool_prefix"; then
-  for ac_prog in cl.exe
-  do
-    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
-set dummy $ac_tool_prefix$ac_prog; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -n "$CC"; then
-  ac_cv_prog_CC="$CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-  done
-IFS=$as_save_IFS
-
-fi
-fi
-CC=$ac_cv_prog_CC
-if test -n "$CC"; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
-$as_echo "$CC" >&6; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
-    test -n "$CC" && break
-  done
-fi
-if test -z "$CC"; then
-  ac_ct_CC=$CC
-  for ac_prog in cl.exe
-do
-  # Extract the first word of "$ac_prog", so it can be a program name with args.
-set dummy $ac_prog; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_CC+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -n "$ac_ct_CC"; then
-  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_ac_ct_CC="$ac_prog"
-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-  done
-IFS=$as_save_IFS
-
-fi
-fi
-ac_ct_CC=$ac_cv_prog_ac_ct_CC
-if test -n "$ac_ct_CC"; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
-$as_echo "$ac_ct_CC" >&6; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
-  test -n "$ac_ct_CC" && break
-done
-
-  if test "x$ac_ct_CC" = x; then
-    CC=""
-  else
-    case $cross_compiling:$ac_tool_warned in
-yes:)
-{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
-$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
-ac_tool_warned=yes ;;
-esac
-    CC=$ac_ct_CC
-  fi
-fi
-
-fi
-
-
-test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "no acceptable C compiler found in \$PATH
-See \`config.log' for more details." "$LINENO" 5; }
-
-# Provide some information about the compiler.
-$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
-set X $ac_compile
-ac_compiler=$2
-for ac_option in --version -v -V -qversion; do
-  { { ac_try="$ac_compiler $ac_option >&5"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_compiler $ac_option >&5") 2>conftest.err
-  ac_status=$?
-  if test -s conftest.err; then
-    sed '10a\
-... rest of stderr output deleted ...
-         10q' conftest.err >conftest.er1
-    cat conftest.er1 >&5
-  fi
-  rm -f conftest.er1 conftest.err
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }
-done
-
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-ac_clean_files_save=$ac_clean_files
-ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out"
-# Try to create an executable without -o first, disregard a.out.
-# It will help us diagnose broken compilers, and finding out an intuition
-# of exeext.
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5
-$as_echo_n "checking whether the C compiler works... " >&6; }
-ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
-
-# The possible output files:
-ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*"
-
-ac_rmfiles=
-for ac_file in $ac_files
-do
-  case $ac_file in
-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
-    * ) ac_rmfiles="$ac_rmfiles $ac_file";;
-  esac
-done
-rm -f $ac_rmfiles
-
-if { { ac_try="$ac_link_default"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_link_default") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }; then :
-  # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
-# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
-# in a Makefile.  We should not override ac_cv_exeext if it was cached,
-# so that the user can short-circuit this test for compilers unknown to
-# Autoconf.
-for ac_file in $ac_files ''
-do
-  test -f "$ac_file" || continue
-  case $ac_file in
-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj )
-	;;
-    [ab].out )
-	# We found the default executable, but exeext='' is most
-	# certainly right.
-	break;;
-    *.* )
-	if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no;
-	then :; else
-	   ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
-	fi
-	# We set ac_cv_exeext here because the later test for it is not
-	# safe: cross compilers may not add the suffix if given an `-o'
-	# argument, so we may need to know it at that point already.
-	# Even if this section looks crufty: it has the advantage of
-	# actually working.
-	break;;
-    * )
-	break;;
-  esac
-done
-test "$ac_cv_exeext" = no && ac_cv_exeext=
-
-else
-  ac_file=''
-fi
-if test -z "$ac_file"; then :
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-$as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-{ as_fn_set_status 77
-as_fn_error "C compiler cannot create executables
-See \`config.log' for more details." "$LINENO" 5; }; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5
-$as_echo_n "checking for C compiler default output file name... " >&6; }
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5
-$as_echo "$ac_file" >&6; }
-ac_exeext=$ac_cv_exeext
-
-rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out
-ac_clean_files=$ac_clean_files_save
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5
-$as_echo_n "checking for suffix of executables... " >&6; }
-if { { ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_link") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }; then :
-  # If both `conftest.exe' and `conftest' are `present' (well, observable)
-# catch `conftest.exe'.  For instance with Cygwin, `ls conftest' will
-# work properly (i.e., refer to `conftest.exe'), while it won't with
-# `rm'.
-for ac_file in conftest.exe conftest conftest.*; do
-  test -f "$ac_file" || continue
-  case $ac_file in
-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
-    *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
-	  break;;
-    * ) break;;
-  esac
-done
-else
-  { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "cannot compute suffix of executables: cannot compile and link
-See \`config.log' for more details." "$LINENO" 5; }
-fi
-rm -f conftest conftest$ac_cv_exeext
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
-$as_echo "$ac_cv_exeext" >&6; }
-
-rm -f conftest.$ac_ext
-EXEEXT=$ac_cv_exeext
-ac_exeext=$EXEEXT
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <stdio.h>
-int
-main ()
-{
-FILE *f = fopen ("conftest.out", "w");
- return ferror (f) || fclose (f) != 0;
-
-  ;
-  return 0;
-}
-_ACEOF
-ac_clean_files="$ac_clean_files conftest.out"
-# Check that the compiler produces executables we can run.  If not, either
-# the compiler is broken, or we cross compile.
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5
-$as_echo_n "checking whether we are cross compiling... " >&6; }
-if test "$cross_compiling" != yes; then
-  { { ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_link") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }
-  if { ac_try='./conftest$ac_cv_exeext'
-  { { case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_try") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }; }; then
-    cross_compiling=no
-  else
-    if test "$cross_compiling" = maybe; then
-	cross_compiling=yes
-    else
-	{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "cannot run C compiled programs.
-If you meant to cross compile, use \`--host'.
-See \`config.log' for more details." "$LINENO" 5; }
-    fi
-  fi
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5
-$as_echo "$cross_compiling" >&6; }
-
-rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out
-ac_clean_files=$ac_clean_files_save
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5
-$as_echo_n "checking for suffix of object files... " >&6; }
-if test "${ac_cv_objext+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-rm -f conftest.o conftest.obj
-if { { ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_compile") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }; then :
-  for ac_file in conftest.o conftest.obj conftest.*; do
-  test -f "$ac_file" || continue;
-  case $ac_file in
-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;;
-    *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
-       break;;
-  esac
-done
-else
-  $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "cannot compute suffix of object files: cannot compile
-See \`config.log' for more details." "$LINENO" 5; }
-fi
-rm -f conftest.$ac_cv_objext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5
-$as_echo "$ac_cv_objext" >&6; }
-OBJEXT=$ac_cv_objext
-ac_objext=$OBJEXT
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5
-$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
-if test "${ac_cv_c_compiler_gnu+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-#ifndef __GNUC__
-       choke me
-#endif
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_compiler_gnu=yes
-else
-  ac_compiler_gnu=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-ac_cv_c_compiler_gnu=$ac_compiler_gnu
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
-$as_echo "$ac_cv_c_compiler_gnu" >&6; }
-if test $ac_compiler_gnu = yes; then
-  GCC=yes
-else
-  GCC=
-fi
-ac_test_CFLAGS=${CFLAGS+set}
-ac_save_CFLAGS=$CFLAGS
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
-$as_echo_n "checking whether $CC accepts -g... " >&6; }
-if test "${ac_cv_prog_cc_g+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  ac_save_c_werror_flag=$ac_c_werror_flag
-   ac_c_werror_flag=yes
-   ac_cv_prog_cc_g=no
-   CFLAGS="-g"
-   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_cv_prog_cc_g=yes
-else
-  CFLAGS=""
-      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-
-else
-  ac_c_werror_flag=$ac_save_c_werror_flag
-	 CFLAGS="-g"
-	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_cv_prog_cc_g=yes
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-   ac_c_werror_flag=$ac_save_c_werror_flag
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
-$as_echo "$ac_cv_prog_cc_g" >&6; }
-if test "$ac_test_CFLAGS" = set; then
-  CFLAGS=$ac_save_CFLAGS
-elif test $ac_cv_prog_cc_g = yes; then
-  if test "$GCC" = yes; then
-    CFLAGS="-g -O2"
-  else
-    CFLAGS="-g"
-  fi
-else
-  if test "$GCC" = yes; then
-    CFLAGS="-O2"
-  else
-    CFLAGS=
-  fi
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5
-$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
-if test "${ac_cv_prog_cc_c89+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  ac_cv_prog_cc_c89=no
-ac_save_CC=$CC
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <stdarg.h>
-#include <stdio.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-/* Most of the following tests are stolen from RCS 5.7's src/conf.sh.  */
-struct buf { int x; };
-FILE * (*rcsopen) (struct buf *, struct stat *, int);
-static char *e (p, i)
-     char **p;
-     int i;
-{
-  return p[i];
-}
-static char *f (char * (*g) (char **, int), char **p, ...)
-{
-  char *s;
-  va_list v;
-  va_start (v,p);
-  s = g (p, va_arg (v,int));
-  va_end (v);
-  return s;
-}
-
-/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default.  It has
-   function prototypes and stuff, but not '\xHH' hex character constants.
-   These don't provoke an error unfortunately, instead are silently treated
-   as 'x'.  The following induces an error, until -std is added to get
-   proper ANSI mode.  Curiously '\x00'!='x' always comes out true, for an
-   array size at least.  It's necessary to write '\x00'==0 to get something
-   that's true only with -std.  */
-int osf4_cc_array ['\x00' == 0 ? 1 : -1];
-
-/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
-   inside strings and character constants.  */
-#define FOO(x) 'x'
-int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
-
-int test (int i, double x);
-struct s1 {int (*f) (int a);};
-struct s2 {int (*f) (double a);};
-int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
-int argc;
-char **argv;
-int
-main ()
-{
-return f (e, argv, 0) != argv[0]  ||  f (e, argv, 1) != argv[1];
-  ;
-  return 0;
-}
-_ACEOF
-for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
-	-Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
-do
-  CC="$ac_save_CC $ac_arg"
-  if ac_fn_c_try_compile "$LINENO"; then :
-  ac_cv_prog_cc_c89=$ac_arg
-fi
-rm -f core conftest.err conftest.$ac_objext
-  test "x$ac_cv_prog_cc_c89" != "xno" && break
-done
-rm -f conftest.$ac_ext
-CC=$ac_save_CC
-
-fi
-# AC_CACHE_VAL
-case "x$ac_cv_prog_cc_c89" in
-  x)
-    { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
-$as_echo "none needed" >&6; } ;;
-  xno)
-    { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
-$as_echo "unsupported" >&6; } ;;
-  *)
-    CC="$CC $ac_cv_prog_cc_c89"
-    { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
-$as_echo "$ac_cv_prog_cc_c89" >&6; } ;;
-esac
-if test "x$ac_cv_prog_cc_c89" != xno; then :
-
-fi
-
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-
-# Checks for libraries.
-
-# Checks for header files.
-
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5
-$as_echo_n "checking how to run the C preprocessor... " >&6; }
-# On Suns, sometimes $CPP names a directory.
-if test -n "$CPP" && test -d "$CPP"; then
-  CPP=
-fi
-if test -z "$CPP"; then
-  if test "${ac_cv_prog_CPP+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-      # Double quotes because CPP needs to be expanded
-    for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp"
-    do
-      ac_preproc_ok=false
-for ac_c_preproc_warn_flag in '' yes
-do
-  # Use a header file that comes with gcc, so configuring glibc
-  # with a fresh cross-compiler works.
-  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
-  # <limits.h> exists even on freestanding compilers.
-  # On the NeXT, cc -E runs the code through the compiler's parser,
-  # not just through cpp. "Syntax error" is here to catch this case.
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#ifdef __STDC__
-# include <limits.h>
-#else
-# include <assert.h>
-#endif
-		     Syntax error
-_ACEOF
-if ac_fn_c_try_cpp "$LINENO"; then :
-
-else
-  # Broken: fails on valid input.
-continue
-fi
-rm -f conftest.err conftest.$ac_ext
-
-  # OK, works on sane cases.  Now check whether nonexistent headers
-  # can be detected and how.
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <ac_nonexistent.h>
-_ACEOF
-if ac_fn_c_try_cpp "$LINENO"; then :
-  # Broken: success on invalid input.
-continue
-else
-  # Passes both tests.
-ac_preproc_ok=:
-break
-fi
-rm -f conftest.err conftest.$ac_ext
-
-done
-# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
-rm -f conftest.err conftest.$ac_ext
-if $ac_preproc_ok; then :
-  break
-fi
-
-    done
-    ac_cv_prog_CPP=$CPP
-
-fi
-  CPP=$ac_cv_prog_CPP
-else
-  ac_cv_prog_CPP=$CPP
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5
-$as_echo "$CPP" >&6; }
-ac_preproc_ok=false
-for ac_c_preproc_warn_flag in '' yes
-do
-  # Use a header file that comes with gcc, so configuring glibc
-  # with a fresh cross-compiler works.
-  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
-  # <limits.h> exists even on freestanding compilers.
-  # On the NeXT, cc -E runs the code through the compiler's parser,
-  # not just through cpp. "Syntax error" is here to catch this case.
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#ifdef __STDC__
-# include <limits.h>
-#else
-# include <assert.h>
-#endif
-		     Syntax error
-_ACEOF
-if ac_fn_c_try_cpp "$LINENO"; then :
-
-else
-  # Broken: fails on valid input.
-continue
-fi
-rm -f conftest.err conftest.$ac_ext
-
-  # OK, works on sane cases.  Now check whether nonexistent headers
-  # can be detected and how.
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <ac_nonexistent.h>
-_ACEOF
-if ac_fn_c_try_cpp "$LINENO"; then :
-  # Broken: success on invalid input.
-continue
-else
-  # Passes both tests.
-ac_preproc_ok=:
-break
-fi
-rm -f conftest.err conftest.$ac_ext
-
-done
-# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
-rm -f conftest.err conftest.$ac_ext
-if $ac_preproc_ok; then :
-
-else
-  { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "C preprocessor \"$CPP\" fails sanity check
-See \`config.log' for more details." "$LINENO" 5; }
-fi
-
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
-$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
-if test "${ac_cv_path_GREP+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -z "$GREP"; then
-  ac_path_GREP_found=false
-  # Loop through the user's path and test for each of PROGNAME-LIST
-  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_prog in grep ggrep; do
-    for ac_exec_ext in '' $ac_executable_extensions; do
-      ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
-      { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue
-# Check for GNU ac_path_GREP and select it if it is found.
-  # Check for GNU $ac_path_GREP
-case `"$ac_path_GREP" --version 2>&1` in
-*GNU*)
-  ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
-*)
-  ac_count=0
-  $as_echo_n 0123456789 >"conftest.in"
-  while :
-  do
-    cat "conftest.in" "conftest.in" >"conftest.tmp"
-    mv "conftest.tmp" "conftest.in"
-    cp "conftest.in" "conftest.nl"
-    $as_echo 'GREP' >> "conftest.nl"
-    "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
-    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
-    as_fn_arith $ac_count + 1 && ac_count=$as_val
-    if test $ac_count -gt ${ac_path_GREP_max-0}; then
-      # Best one so far, save it but keep looking for a better one
-      ac_cv_path_GREP="$ac_path_GREP"
-      ac_path_GREP_max=$ac_count
-    fi
-    # 10*(2^10) chars as input seems more than enough
-    test $ac_count -gt 10 && break
-  done
-  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
-esac
-
-      $ac_path_GREP_found && break 3
-    done
-  done
-  done
-IFS=$as_save_IFS
-  if test -z "$ac_cv_path_GREP"; then
-    as_fn_error "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
-  fi
-else
-  ac_cv_path_GREP=$GREP
-fi
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5
-$as_echo "$ac_cv_path_GREP" >&6; }
- GREP="$ac_cv_path_GREP"
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
-$as_echo_n "checking for egrep... " >&6; }
-if test "${ac_cv_path_EGREP+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
-   then ac_cv_path_EGREP="$GREP -E"
-   else
-     if test -z "$EGREP"; then
-  ac_path_EGREP_found=false
-  # Loop through the user's path and test for each of PROGNAME-LIST
-  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_prog in egrep; do
-    for ac_exec_ext in '' $ac_executable_extensions; do
-      ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
-      { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue
-# Check for GNU ac_path_EGREP and select it if it is found.
-  # Check for GNU $ac_path_EGREP
-case `"$ac_path_EGREP" --version 2>&1` in
-*GNU*)
-  ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
-*)
-  ac_count=0
-  $as_echo_n 0123456789 >"conftest.in"
-  while :
-  do
-    cat "conftest.in" "conftest.in" >"conftest.tmp"
-    mv "conftest.tmp" "conftest.in"
-    cp "conftest.in" "conftest.nl"
-    $as_echo 'EGREP' >> "conftest.nl"
-    "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
-    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
-    as_fn_arith $ac_count + 1 && ac_count=$as_val
-    if test $ac_count -gt ${ac_path_EGREP_max-0}; then
-      # Best one so far, save it but keep looking for a better one
-      ac_cv_path_EGREP="$ac_path_EGREP"
-      ac_path_EGREP_max=$ac_count
-    fi
-    # 10*(2^10) chars as input seems more than enough
-    test $ac_count -gt 10 && break
-  done
-  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
-esac
-
-      $ac_path_EGREP_found && break 3
-    done
-  done
-  done
-IFS=$as_save_IFS
-  if test -z "$ac_cv_path_EGREP"; then
-    as_fn_error "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
-  fi
-else
-  ac_cv_path_EGREP=$EGREP
-fi
-
-   fi
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5
-$as_echo "$ac_cv_path_EGREP" >&6; }
- EGREP="$ac_cv_path_EGREP"
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5
-$as_echo_n "checking for ANSI C header files... " >&6; }
-if test "${ac_cv_header_stdc+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <stdlib.h>
-#include <stdarg.h>
-#include <string.h>
-#include <float.h>
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_cv_header_stdc=yes
-else
-  ac_cv_header_stdc=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-
-if test $ac_cv_header_stdc = yes; then
-  # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <string.h>
-
-_ACEOF
-if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
-  $EGREP "memchr" >/dev/null 2>&1; then :
-
-else
-  ac_cv_header_stdc=no
-fi
-rm -f conftest*
-
-fi
-
-if test $ac_cv_header_stdc = yes; then
-  # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <stdlib.h>
-
-_ACEOF
-if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
-  $EGREP "free" >/dev/null 2>&1; then :
-
-else
-  ac_cv_header_stdc=no
-fi
-rm -f conftest*
-
-fi
-
-if test $ac_cv_header_stdc = yes; then
-  # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
-  if test "$cross_compiling" = yes; then :
-  :
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <ctype.h>
-#include <stdlib.h>
-#if ((' ' & 0x0FF) == 0x020)
-# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
-# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
-#else
-# define ISLOWER(c) \
-		   (('a' <= (c) && (c) <= 'i') \
-		     || ('j' <= (c) && (c) <= 'r') \
-		     || ('s' <= (c) && (c) <= 'z'))
-# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
-#endif
-
-#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
-int
-main ()
-{
-  int i;
-  for (i = 0; i < 256; i++)
-    if (XOR (islower (i), ISLOWER (i))
-	|| toupper (i) != TOUPPER (i))
-      return 2;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_run "$LINENO"; then :
-
-else
-  ac_cv_header_stdc=no
-fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
-  conftest.$ac_objext conftest.beam conftest.$ac_ext
-fi
-
-fi
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5
-$as_echo "$ac_cv_header_stdc" >&6; }
-if test $ac_cv_header_stdc = yes; then
-
-$as_echo "#define STDC_HEADERS 1" >>confdefs.h
-
-fi
-
-# On IRIX 5.3, sys/types and inttypes.h are conflicting.
-for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \
-		  inttypes.h stdint.h unistd.h
-do :
-  as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
-ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default
-"
-eval as_val=\$$as_ac_Header
-   if test "x$as_val" = x""yes; then :
-  cat >>confdefs.h <<_ACEOF
-#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
-_ACEOF
-
-fi
-
-done
-
-
-for ac_header in stdlib.h string.h unistd.h fcntl.h
-do :
-  as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
-ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
-eval as_val=\$$as_ac_Header
-   if test "x$as_val" = x""yes; then :
-  cat >>confdefs.h <<_ACEOF
-#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
-_ACEOF
-
-fi
-
-done
-
-
-#check for HADOOP_PREFIX
-if test "$with_home" != ""
-then
-cat >>confdefs.h <<_ACEOF
-#define HADOOP_PREFIX "$with_home"
-_ACEOF
-
-fi
-
-# Checks for typedefs, structures, and compiler characteristics.
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5
-$as_echo_n "checking for an ANSI C-conforming const... " >&6; }
-if test "${ac_cv_c_const+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-/* FIXME: Include the comments suggested by Paul. */
-#ifndef __cplusplus
-  /* Ultrix mips cc rejects this.  */
-  typedef int charset[2];
-  const charset cs;
-  /* SunOS 4.1.1 cc rejects this.  */
-  char const *const *pcpcc;
-  char **ppc;
-  /* NEC SVR4.0.2 mips cc rejects this.  */
-  struct point {int x, y;};
-  static struct point const zero = {0,0};
-  /* AIX XL C 1.02.0.0 rejects this.
-     It does not let you subtract one const X* pointer from another in
-     an arm of an if-expression whose if-part is not a constant
-     expression */
-  const char *g = "string";
-  pcpcc = &g + (g ? g-g : 0);
-  /* HPUX 7.0 cc rejects these. */
-  ++pcpcc;
-  ppc = (char**) pcpcc;
-  pcpcc = (char const *const *) ppc;
-  { /* SCO 3.2v4 cc rejects this.  */
-    char *t;
-    char const *s = 0 ? (char *) 0 : (char const *) 0;
-
-    *t++ = 0;
-    if (s) return 0;
-  }
-  { /* Someone thinks the Sun supposedly-ANSI compiler will reject this.  */
-    int x[] = {25, 17};
-    const int *foo = &x[0];
-    ++foo;
-  }
-  { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */
-    typedef const int *iptr;
-    iptr p = 0;
-    ++p;
-  }
-  { /* AIX XL C 1.02.0.0 rejects this saying
-       "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */
-    struct s { int j; const int *ap[3]; };
-    struct s *b; b->j = 5;
-  }
-  { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */
-    const int foo = 10;
-    if (!foo) return 0;
-  }
-  return !cs[0] && !zero.x;
-#endif
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_cv_c_const=yes
-else
-  ac_cv_c_const=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5
-$as_echo "$ac_cv_c_const" >&6; }
-if test $ac_cv_c_const = no; then
-
-$as_echo "#define const /**/" >>confdefs.h
-
-fi
-
-ac_fn_c_check_type "$LINENO" "pid_t" "ac_cv_type_pid_t" "$ac_includes_default"
-if test "x$ac_cv_type_pid_t" = x""yes; then :
-
-else
-
-cat >>confdefs.h <<_ACEOF
-#define pid_t int
-_ACEOF
-
-fi
-
-ac_fn_c_check_type "$LINENO" "mode_t" "ac_cv_type_mode_t" "$ac_includes_default"
-if test "x$ac_cv_type_mode_t" = x""yes; then :
-
-else
-
-cat >>confdefs.h <<_ACEOF
-#define mode_t int
-_ACEOF
-
-fi
-
-ac_fn_c_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default"
-if test "x$ac_cv_type_size_t" = x""yes; then :
-
-else
-
-cat >>confdefs.h <<_ACEOF
-#define size_t unsigned int
-_ACEOF
-
-fi
-
-
-# Checks for library functions.
-for ac_header in stdlib.h
-do :
-  ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default"
-if test "x$ac_cv_header_stdlib_h" = x""yes; then :
-  cat >>confdefs.h <<_ACEOF
-#define HAVE_STDLIB_H 1
-_ACEOF
-
-fi
-
-done
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible malloc" >&5
-$as_echo_n "checking for GNU libc compatible malloc... " >&6; }
-if test "${ac_cv_func_malloc_0_nonnull+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test "$cross_compiling" = yes; then :
-  ac_cv_func_malloc_0_nonnull=no
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#if defined STDC_HEADERS || defined HAVE_STDLIB_H
-# include <stdlib.h>
-#else
-char *malloc ();
-#endif
-
-int
-main ()
-{
-return ! malloc (0);
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_run "$LINENO"; then :
-  ac_cv_func_malloc_0_nonnull=yes
-else
-  ac_cv_func_malloc_0_nonnull=no
-fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
-  conftest.$ac_objext conftest.beam conftest.$ac_ext
-fi
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_malloc_0_nonnull" >&5
-$as_echo "$ac_cv_func_malloc_0_nonnull" >&6; }
-if test $ac_cv_func_malloc_0_nonnull = yes; then :
-
-$as_echo "#define HAVE_MALLOC 1" >>confdefs.h
-
-else
-  $as_echo "#define HAVE_MALLOC 0" >>confdefs.h
-
-   case " $LIBOBJS " in
-  *" malloc.$ac_objext "* ) ;;
-  *) LIBOBJS="$LIBOBJS malloc.$ac_objext"
- ;;
-esac
-
-
-$as_echo "#define malloc rpl_malloc" >>confdefs.h
-
-fi
-
-
-for ac_header in stdlib.h
-do :
-  ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default"
-if test "x$ac_cv_header_stdlib_h" = x""yes; then :
-  cat >>confdefs.h <<_ACEOF
-#define HAVE_STDLIB_H 1
-_ACEOF
-
-fi
-
-done
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible realloc" >&5
-$as_echo_n "checking for GNU libc compatible realloc... " >&6; }
-if test "${ac_cv_func_realloc_0_nonnull+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test "$cross_compiling" = yes; then :
-  ac_cv_func_realloc_0_nonnull=no
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#if defined STDC_HEADERS || defined HAVE_STDLIB_H
-# include <stdlib.h>
-#else
-char *realloc ();
-#endif
-
-int
-main ()
-{
-return ! realloc (0, 0);
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_run "$LINENO"; then :
-  ac_cv_func_realloc_0_nonnull=yes
-else
-  ac_cv_func_realloc_0_nonnull=no
-fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
-  conftest.$ac_objext conftest.beam conftest.$ac_ext
-fi
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_realloc_0_nonnull" >&5
-$as_echo "$ac_cv_func_realloc_0_nonnull" >&6; }
-if test $ac_cv_func_realloc_0_nonnull = yes; then :
-
-$as_echo "#define HAVE_REALLOC 1" >>confdefs.h
-
-else
-  $as_echo "#define HAVE_REALLOC 0" >>confdefs.h
-
-   case " $LIBOBJS " in
-  *" realloc.$ac_objext "* ) ;;
-  *) LIBOBJS="$LIBOBJS realloc.$ac_objext"
- ;;
-esac
-
-
-$as_echo "#define realloc rpl_realloc" >>confdefs.h
-
-fi
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for uid_t in sys/types.h" >&5
-$as_echo_n "checking for uid_t in sys/types.h... " >&6; }
-if test "${ac_cv_type_uid_t+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <sys/types.h>
-
-_ACEOF
-if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
-  $EGREP "uid_t" >/dev/null 2>&1; then :
-  ac_cv_type_uid_t=yes
-else
-  ac_cv_type_uid_t=no
-fi
-rm -f conftest*
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_uid_t" >&5
-$as_echo "$ac_cv_type_uid_t" >&6; }
-if test $ac_cv_type_uid_t = no; then
-
-$as_echo "#define uid_t int" >>confdefs.h
-
-
-$as_echo "#define gid_t int" >>confdefs.h
-
-fi
-
-for ac_header in unistd.h
-do :
-  ac_fn_c_check_header_mongrel "$LINENO" "unistd.h" "ac_cv_header_unistd_h" "$ac_includes_default"
-if test "x$ac_cv_header_unistd_h" = x""yes; then :
-  cat >>confdefs.h <<_ACEOF
-#define HAVE_UNISTD_H 1
-_ACEOF
-
-fi
-
-done
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for working chown" >&5
-$as_echo_n "checking for working chown... " >&6; }
-if test "${ac_cv_func_chown_works+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test "$cross_compiling" = yes; then :
-  ac_cv_func_chown_works=no
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-$ac_includes_default
-#include <fcntl.h>
-
-int
-main ()
-{
-  char *f = "conftest.chown";
-  struct stat before, after;
-
-  if (creat (f, 0600) < 0)
-    return 1;
-  if (stat (f, &before) < 0)
-    return 1;
-  if (chown (f, (uid_t) -1, (gid_t) -1) == -1)
-    return 1;
-  if (stat (f, &after) < 0)
-    return 1;
-  return ! (before.st_uid == after.st_uid && before.st_gid == after.st_gid);
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_run "$LINENO"; then :
-  ac_cv_func_chown_works=yes
-else
-  ac_cv_func_chown_works=no
-fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
-  conftest.$ac_objext conftest.beam conftest.$ac_ext
-fi
-
-rm -f conftest.chown
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_chown_works" >&5
-$as_echo "$ac_cv_func_chown_works" >&6; }
-if test $ac_cv_func_chown_works = yes; then
-
-$as_echo "#define HAVE_CHOWN 1" >>confdefs.h
-
-fi
-
-for ac_func in strerror memset mkdir rmdir strdup
-do :
-  as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
-ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
-eval as_val=\$$as_ac_var
-   if test "x$as_val" = x""yes; then :
-  cat >>confdefs.h <<_ACEOF
-#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
-_ACEOF
-
-fi
-done
-
-
-ac_config_files="$ac_config_files Makefile"
-
-cat >confcache <<\_ACEOF
-# This file is a shell script that caches the results of configure
-# tests run on this system so they can be shared between configure
-# scripts and configure runs, see configure's option --config-cache.
-# It is not useful on other systems.  If it contains results you don't
-# want to keep, you may remove or edit it.
-#
-# config.status only pays attention to the cache file if you give it
-# the --recheck option to rerun configure.
-#
-# `ac_cv_env_foo' variables (set or unset) will be overridden when
-# loading this file, other *unset* `ac_cv_foo' will be assigned the
-# following values.
-
-_ACEOF
-
-# The following way of writing the cache mishandles newlines in values,
-# but we know of no workaround that is simple, portable, and efficient.
-# So, we kill variables containing newlines.
-# Ultrix sh set writes to stderr and can't be redirected directly,
-# and sets the high bit in the cache file unless we assign to the vars.
-(
-  for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
-    eval ac_val=\$$ac_var
-    case $ac_val in #(
-    *${as_nl}*)
-      case $ac_var in #(
-      *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
-$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
-      esac
-      case $ac_var in #(
-      _ | IFS | as_nl) ;; #(
-      BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
-      *) { eval $ac_var=; unset $ac_var;} ;;
-      esac ;;
-    esac
-  done
-
-  (set) 2>&1 |
-    case $as_nl`(ac_space=' '; set) 2>&1` in #(
-    *${as_nl}ac_space=\ *)
-      # `set' does not quote correctly, so add quotes: double-quote
-      # substitution turns \\\\ into \\, and sed turns \\ into \.
-      sed -n \
-	"s/'/'\\\\''/g;
-	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
-      ;; #(
-    *)
-      # `set' quotes correctly as required by POSIX, so do not add quotes.
-      sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
-      ;;
-    esac |
-    sort
-) |
-  sed '
-     /^ac_cv_env_/b end
-     t clear
-     :clear
-     s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
-     t end
-     s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
-     :end' >>confcache
-if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
-  if test -w "$cache_file"; then
-    test "x$cache_file" != "x/dev/null" &&
-      { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5
-$as_echo "$as_me: updating cache $cache_file" >&6;}
-    cat confcache >$cache_file
-  else
-    { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5
-$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;}
-  fi
-fi
-rm -f confcache
-
-test "x$prefix" = xNONE && prefix=$ac_default_prefix
-# Let make expand exec_prefix.
-test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
-
-DEFS=-DHAVE_CONFIG_H
-
-ac_libobjs=
-ac_ltlibobjs=
-for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
-  # 1. Remove the extension, and $U if already installed.
-  ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
-  ac_i=`$as_echo "$ac_i" | sed "$ac_script"`
-  # 2. Prepend LIBOBJDIR.  When used with automake>=1.10 LIBOBJDIR
-  #    will be set to the directory where LIBOBJS objects are built.
-  as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext"
-  as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo'
-done
-LIBOBJS=$ac_libobjs
-
-LTLIBOBJS=$ac_ltlibobjs
-
-
-
-: ${CONFIG_STATUS=./config.status}
-ac_write_fail=0
-ac_clean_files_save=$ac_clean_files
-ac_clean_files="$ac_clean_files $CONFIG_STATUS"
-{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5
-$as_echo "$as_me: creating $CONFIG_STATUS" >&6;}
-as_write_fail=0
-cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1
-#! $SHELL
-# Generated by $as_me.
-# Run this file to recreate the current configuration.
-# Compiler output produced by configure, useful for debugging
-# configure, is in config.log if it exists.
-
-debug=false
-ac_cs_recheck=false
-ac_cs_silent=false
-
-SHELL=\${CONFIG_SHELL-$SHELL}
-export SHELL
-_ASEOF
-cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1
-## -------------------- ##
-## M4sh Initialization. ##
-## -------------------- ##
-
-# Be more Bourne compatible
-DUALCASE=1; export DUALCASE # for MKS sh
-if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
-  emulate sh
-  NULLCMD=:
-  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
-  # is contrary to our usage.  Disable this feature.
-  alias -g '${1+"$@"}'='"$@"'
-  setopt NO_GLOB_SUBST
-else
-  case `(set -o) 2>/dev/null` in #(
-  *posix*) :
-    set -o posix ;; #(
-  *) :
-     ;;
-esac
-fi
-
-
-as_nl='
-'
-export as_nl
-# Printing a long string crashes Solaris 7 /usr/bin/printf.
-as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
-as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
-as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
-# Prefer a ksh shell builtin over an external printf program on Solaris,
-# but without wasting forks for bash or zsh.
-if test -z "$BASH_VERSION$ZSH_VERSION" \
-    && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
-  as_echo='print -r --'
-  as_echo_n='print -rn --'
-elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
-  as_echo='printf %s\n'
-  as_echo_n='printf %s'
-else
-  if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
-    as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
-    as_echo_n='/usr/ucb/echo -n'
-  else
-    as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
-    as_echo_n_body='eval
-      arg=$1;
-      case $arg in #(
-      *"$as_nl"*)
-	expr "X$arg" : "X\\(.*\\)$as_nl";
-	arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
-      esac;
-      expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
-    '
-    export as_echo_n_body
-    as_echo_n='sh -c $as_echo_n_body as_echo'
-  fi
-  export as_echo_body
-  as_echo='sh -c $as_echo_body as_echo'
-fi
-
-# The user is always right.
-if test "${PATH_SEPARATOR+set}" != set; then
-  PATH_SEPARATOR=:
-  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
-    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
-      PATH_SEPARATOR=';'
-  }
-fi
-
-
-# IFS
-# We need space, tab and new line, in precisely that order.  Quoting is
-# there to prevent editors from complaining about space-tab.
-# (If _AS_PATH_WALK were called with IFS unset, it would disable word
-# splitting by setting IFS to empty value.)
-IFS=" ""	$as_nl"
-
-# Find who we are.  Look in the path if we contain no directory separator.
-case $0 in #((
-  *[\\/]* ) as_myself=$0 ;;
-  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
-  done
-IFS=$as_save_IFS
-
-     ;;
-esac
-# We did not find ourselves, most probably we were run as `sh COMMAND'
-# in which case we are not to be found in the path.
-if test "x$as_myself" = x; then
-  as_myself=$0
-fi
-if test ! -f "$as_myself"; then
-  $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
-  exit 1
-fi
-
-# Unset variables that we do not need and which cause bugs (e.g. in
-# pre-3.0 UWIN ksh).  But do not cause bugs in bash 2.01; the "|| exit 1"
-# suppresses any "Segmentation fault" message there.  '((' could
-# trigger a bug in pdksh 5.2.14.
-for as_var in BASH_ENV ENV MAIL MAILPATH
-do eval test x\${$as_var+set} = xset \
-  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
-done
-PS1='$ '
-PS2='> '
-PS4='+ '
-
-# NLS nuisances.
-LC_ALL=C
-export LC_ALL
-LANGUAGE=C
-export LANGUAGE
-
-# CDPATH.
-(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
-
-
-# as_fn_error ERROR [LINENO LOG_FD]
-# ---------------------------------
-# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
-# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
-# script with status $?, using 1 if that was 0.
-as_fn_error ()
-{
-  as_status=$?; test $as_status -eq 0 && as_status=1
-  if test "$3"; then
-    as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-    $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3
-  fi
-  $as_echo "$as_me: error: $1" >&2
-  as_fn_exit $as_status
-} # as_fn_error
-
-
-# as_fn_set_status STATUS
-# -----------------------
-# Set $? to STATUS, without forking.
-as_fn_set_status ()
-{
-  return $1
-} # as_fn_set_status
-
-# as_fn_exit STATUS
-# -----------------
-# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
-as_fn_exit ()
-{
-  set +e
-  as_fn_set_status $1
-  exit $1
-} # as_fn_exit
-
-# as_fn_unset VAR
-# ---------------
-# Portably unset VAR.
-as_fn_unset ()
-{
-  { eval $1=; unset $1;}
-}
-as_unset=as_fn_unset
-# as_fn_append VAR VALUE
-# ----------------------
-# Append the text in VALUE to the end of the definition contained in VAR. Take
-# advantage of any shell optimizations that allow amortized linear growth over
-# repeated appends, instead of the typical quadratic growth present in naive
-# implementations.
-if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
-  eval 'as_fn_append ()
-  {
-    eval $1+=\$2
-  }'
-else
-  as_fn_append ()
-  {
-    eval $1=\$$1\$2
-  }
-fi # as_fn_append
-
-# as_fn_arith ARG...
-# ------------------
-# Perform arithmetic evaluation on the ARGs, and store the result in the
-# global $as_val. Take advantage of shells that can avoid forks. The arguments
-# must be portable across $(()) and expr.
-if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
-  eval 'as_fn_arith ()
-  {
-    as_val=$(( $* ))
-  }'
-else
-  as_fn_arith ()
-  {
-    as_val=`expr "$@" || test $? -eq 1`
-  }
-fi # as_fn_arith
-
-
-if expr a : '\(a\)' >/dev/null 2>&1 &&
-   test "X`expr 00001 : '.*\(...\)'`" = X001; then
-  as_expr=expr
-else
-  as_expr=false
-fi
-
-if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
-  as_basename=basename
-else
-  as_basename=false
-fi
-
-if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
-  as_dirname=dirname
-else
-  as_dirname=false
-fi
-
-as_me=`$as_basename -- "$0" ||
-$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
-	 X"$0" : 'X\(//\)$' \| \
-	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
-$as_echo X/"$0" |
-    sed '/^.*\/\([^/][^/]*\)\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\/\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\/\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-
-# Avoid depending upon Character Ranges.
-as_cr_letters='abcdefghijklmnopqrstuvwxyz'
-as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
-as_cr_Letters=$as_cr_letters$as_cr_LETTERS
-as_cr_digits='0123456789'
-as_cr_alnum=$as_cr_Letters$as_cr_digits
-
-ECHO_C= ECHO_N= ECHO_T=
-case `echo -n x` in #(((((
--n*)
-  case `echo 'xy\c'` in
-  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
-  xy)  ECHO_C='\c';;
-  *)   echo `echo ksh88 bug on AIX 6.1` > /dev/null
-       ECHO_T='	';;
-  esac;;
-*)
-  ECHO_N='-n';;
-esac
-
-rm -f conf$$ conf$$.exe conf$$.file
-if test -d conf$$.dir; then
-  rm -f conf$$.dir/conf$$.file
-else
-  rm -f conf$$.dir
-  mkdir conf$$.dir 2>/dev/null
-fi
-if (echo >conf$$.file) 2>/dev/null; then
-  if ln -s conf$$.file conf$$ 2>/dev/null; then
-    as_ln_s='ln -s'
-    # ... but there are two gotchas:
-    # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
-    # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
-    # In both cases, we have to default to `cp -p'.
-    ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
-      as_ln_s='cp -p'
-  elif ln conf$$.file conf$$ 2>/dev/null; then
-    as_ln_s=ln
-  else
-    as_ln_s='cp -p'
-  fi
-else
-  as_ln_s='cp -p'
-fi
-rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
-rmdir conf$$.dir 2>/dev/null
-
-
-# as_fn_mkdir_p
-# -------------
-# Create "$as_dir" as a directory, including parents if necessary.
-as_fn_mkdir_p ()
-{
-
-  case $as_dir in #(
-  -*) as_dir=./$as_dir;;
-  esac
-  test -d "$as_dir" || eval $as_mkdir_p || {
-    as_dirs=
-    while :; do
-      case $as_dir in #(
-      *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
-      *) as_qdir=$as_dir;;
-      esac
-      as_dirs="'$as_qdir' $as_dirs"
-      as_dir=`$as_dirname -- "$as_dir" ||
-$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
-	 X"$as_dir" : 'X\(//\)[^/]' \| \
-	 X"$as_dir" : 'X\(//\)$' \| \
-	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
-$as_echo X"$as_dir" |
-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)[^/].*/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-      test -d "$as_dir" && break
-    done
-    test -z "$as_dirs" || eval "mkdir $as_dirs"
-  } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir"
-
-
-} # as_fn_mkdir_p
-if mkdir -p . 2>/dev/null; then
-  as_mkdir_p='mkdir -p "$as_dir"'
-else
-  test -d ./-p && rmdir ./-p
-  as_mkdir_p=false
-fi
-
-if test -x / >/dev/null 2>&1; then
-  as_test_x='test -x'
-else
-  if ls -dL / >/dev/null 2>&1; then
-    as_ls_L_option=L
-  else
-    as_ls_L_option=
-  fi
-  as_test_x='
-    eval sh -c '\''
-      if test -d "$1"; then
-	test -d "$1/.";
-      else
-	case $1 in #(
-	-*)set "./$1";;
-	esac;
-	case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
-	???[sx]*):;;*)false;;esac;fi
-    '\'' sh
-  '
-fi
-as_executable_p=$as_test_x
-
-# Sed expression to map a string onto a valid CPP name.
-as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
-
-# Sed expression to map a string onto a valid variable name.
-as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
-
-
-exec 6>&1
-## ----------------------------------- ##
-## Main body of $CONFIG_STATUS script. ##
-## ----------------------------------- ##
-_ASEOF
-test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1
-
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-# Save the log message, to keep $0 and so on meaningful, and to
-# report actual input values of CONFIG_FILES etc. instead of their
-# values after options handling.
-ac_log="
-This file was extended by runAs $as_me 0.1, which was
-generated by GNU Autoconf 2.65.  Invocation command line was
-
-  CONFIG_FILES    = $CONFIG_FILES
-  CONFIG_HEADERS  = $CONFIG_HEADERS
-  CONFIG_LINKS    = $CONFIG_LINKS
-  CONFIG_COMMANDS = $CONFIG_COMMANDS
-  $ $0 $@
-
-on `(hostname || uname -n) 2>/dev/null | sed 1q`
-"
-
-_ACEOF
-
-case $ac_config_files in *"
-"*) set x $ac_config_files; shift; ac_config_files=$*;;
-esac
-
-case $ac_config_headers in *"
-"*) set x $ac_config_headers; shift; ac_config_headers=$*;;
-esac
-
-
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-# Files that config.status was made for.
-config_files="$ac_config_files"
-config_headers="$ac_config_headers"
-
-_ACEOF
-
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-ac_cs_usage="\
-\`$as_me' instantiates files and other configuration actions
-from templates according to the current configuration.  Unless the files
-and actions are specified as TAGs, all are instantiated by default.
-
-Usage: $0 [OPTION]... [TAG]...
-
-  -h, --help       print this help, then exit
-  -V, --version    print version number and configuration settings, then exit
-      --config     print configuration, then exit
-  -q, --quiet, --silent
-                   do not print progress messages
-  -d, --debug      don't remove temporary files
-      --recheck    update $as_me by reconfiguring in the same conditions
-      --file=FILE[:TEMPLATE]
-                   instantiate the configuration file FILE
-      --header=FILE[:TEMPLATE]
-                   instantiate the configuration header FILE
-
-Configuration files:
-$config_files
-
-Configuration headers:
-$config_headers
-
-Report bugs to the package provider."
-
-_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
-ac_cs_version="\\
-runAs config.status 0.1
-configured by $0, generated by GNU Autoconf 2.65,
-  with options \\"\$ac_cs_config\\"
-
-Copyright (C) 2009 Free Software Foundation, Inc.
-This config.status script is free software; the Free Software Foundation
-gives unlimited permission to copy, distribute and modify it."
-
-ac_pwd='$ac_pwd'
-srcdir='$srcdir'
-test -n "\$AWK" || AWK=awk
-_ACEOF
-
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-# The default lists apply if the user does not specify any file.
-ac_need_defaults=:
-while test $# != 0
-do
-  case $1 in
-  --*=*)
-    ac_option=`expr "X$1" : 'X\([^=]*\)='`
-    ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
-    ac_shift=:
-    ;;
-  *)
-    ac_option=$1
-    ac_optarg=$2
-    ac_shift=shift
-    ;;
-  esac
-
-  case $ac_option in
-  # Handling of the options.
-  -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
-    ac_cs_recheck=: ;;
-  --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
-    $as_echo "$ac_cs_version"; exit ;;
-  --config | --confi | --conf | --con | --co | --c )
-    $as_echo "$ac_cs_config"; exit ;;
-  --debug | --debu | --deb | --de | --d | -d )
-    debug=: ;;
-  --file | --fil | --fi | --f )
-    $ac_shift
-    case $ac_optarg in
-    *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
-    esac
-    as_fn_append CONFIG_FILES " '$ac_optarg'"
-    ac_need_defaults=false;;
-  --header | --heade | --head | --hea )
-    $ac_shift
-    case $ac_optarg in
-    *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
-    esac
-    as_fn_append CONFIG_HEADERS " '$ac_optarg'"
-    ac_need_defaults=false;;
-  --he | --h)
-    # Conflict between --help and --header
-    as_fn_error "ambiguous option: \`$1'
-Try \`$0 --help' for more information.";;
-  --help | --hel | -h )
-    $as_echo "$ac_cs_usage"; exit ;;
-  -q | -quiet | --quiet | --quie | --qui | --qu | --q \
-  | -silent | --silent | --silen | --sile | --sil | --si | --s)
-    ac_cs_silent=: ;;
-
-  # This is an error.
-  -*) as_fn_error "unrecognized option: \`$1'
-Try \`$0 --help' for more information." ;;
-
-  *) as_fn_append ac_config_targets " $1"
-     ac_need_defaults=false ;;
-
-  esac
-  shift
-done
-
-ac_configure_extra_args=
-
-if $ac_cs_silent; then
-  exec 6>/dev/null
-  ac_configure_extra_args="$ac_configure_extra_args --silent"
-fi
-
-_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-if \$ac_cs_recheck; then
-  set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
-  shift
-  \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
-  CONFIG_SHELL='$SHELL'
-  export CONFIG_SHELL
-  exec "\$@"
-fi
-
-_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-exec 5>>config.log
-{
-  echo
-  sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
-## Running $as_me. ##
-_ASBOX
-  $as_echo "$ac_log"
-} >&5
-
-_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-_ACEOF
-
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-
-# Handling of arguments.
-for ac_config_target in $ac_config_targets
-do
-  case $ac_config_target in
-    "runAs.h") CONFIG_HEADERS="$CONFIG_HEADERS runAs.h" ;;
-    "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
-
-  *) as_fn_error "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
-  esac
-done
-
-
-# If the user did not use the arguments to specify the items to instantiate,
-# then the envvar interface is used.  Set only those that are not.
-# We use the long form for the default assignment because of an extremely
-# bizarre bug on SunOS 4.1.3.
-if $ac_need_defaults; then
-  test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
-  test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers
-fi
-
-# Have a temporary directory for convenience.  Make it in the build tree
-# simply because there is no reason against having it here, and in addition,
-# creating and moving files from /tmp can sometimes cause problems.
-# Hook for its removal unless debugging.
-# Note that there is a small window in which the directory will not be cleaned:
-# after its creation but before its name has been assigned to `$tmp'.
-$debug ||
-{
-  tmp=
-  trap 'exit_status=$?
-  { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status
-' 0
-  trap 'as_fn_exit 1' 1 2 13 15
-}
-# Create a (secure) tmp directory for tmp files.
-
-{
-  tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
-  test -n "$tmp" && test -d "$tmp"
-}  ||
-{
-  tmp=./conf$$-$RANDOM
-  (umask 077 && mkdir "$tmp")
-} || as_fn_error "cannot create a temporary directory in ." "$LINENO" 5
-
-# Set up the scripts for CONFIG_FILES section.
-# No need to generate them if there are no CONFIG_FILES.
-# This happens for instance with `./config.status config.h'.
-if test -n "$CONFIG_FILES"; then
-
-
-ac_cr=`echo X | tr X '\015'`
-# On cygwin, bash can eat \r inside `` if the user requested igncr.
-# But we know of no other shell where ac_cr would be empty at this
-# point, so we can use a bashism as a fallback.
-if test "x$ac_cr" = x; then
-  eval ac_cr=\$\'\\r\'
-fi
-ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
-if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
-  ac_cs_awk_cr='\r'
-else
-  ac_cs_awk_cr=$ac_cr
-fi
-
-echo 'BEGIN {' >"$tmp/subs1.awk" &&
-_ACEOF
-
-
-{
-  echo "cat >conf$$subs.awk <<_ACEOF" &&
-  echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
-  echo "_ACEOF"
-} >conf$$subs.sh ||
-  as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5
-ac_delim_num=`echo "$ac_subst_vars" | grep -c '$'`
-ac_delim='%!_!# '
-for ac_last_try in false false false false false :; do
-  . ./conf$$subs.sh ||
-    as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5
-
-  ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
-  if test $ac_delim_n = $ac_delim_num; then
-    break
-  elif $ac_last_try; then
-    as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5
-  else
-    ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
-  fi
-done
-rm -f conf$$subs.sh
-
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-cat >>"\$tmp/subs1.awk" <<\\_ACAWK &&
-_ACEOF
-sed -n '
-h
-s/^/S["/; s/!.*/"]=/
-p
-g
-s/^[^!]*!//
-:repl
-t repl
-s/'"$ac_delim"'$//
-t delim
-:nl
-h
-s/\(.\{148\}\)..*/\1/
-t more1
-s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/
-p
-n
-b repl
-:more1
-s/["\\]/\\&/g; s/^/"/; s/$/"\\/
-p
-g
-s/.\{148\}//
-t nl
-:delim
-h
-s/\(.\{148\}\)..*/\1/
-t more2
-s/["\\]/\\&/g; s/^/"/; s/$/"/
-p
-b
-:more2
-s/["\\]/\\&/g; s/^/"/; s/$/"\\/
-p
-g
-s/.\{148\}//
-t delim
-' <conf$$subs.awk | sed '
-/^[^""]/{
-  N
-  s/\n//
-}
-' >>$CONFIG_STATUS || ac_write_fail=1
-rm -f conf$$subs.awk
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-_ACAWK
-cat >>"\$tmp/subs1.awk" <<_ACAWK &&
-  for (key in S) S_is_set[key] = 1
-  FS = ""
-
-}
-{
-  line = $ 0
-  nfields = split(line, field, "@")
-  substed = 0
-  len = length(field[1])
-  for (i = 2; i < nfields; i++) {
-    key = field[i]
-    keylen = length(key)
-    if (S_is_set[key]) {
-      value = S[key]
-      line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
-      len += length(value) + length(field[++i])
-      substed = 1
-    } else
-      len += 1 + keylen
-  }
-
-  print line
-}
-
-_ACAWK
-_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
-  sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
-else
-  cat
-fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \
-  || as_fn_error "could not setup config files machinery" "$LINENO" 5
-_ACEOF
-
-# VPATH may cause trouble with some makes, so we remove $(srcdir),
-# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and
-# trailing colons and then remove the whole line if VPATH becomes empty
-# (actually we leave an empty line to preserve line numbers).
-if test "x$srcdir" = x.; then
-  ac_vpsub='/^[	 ]*VPATH[	 ]*=/{
-s/:*\$(srcdir):*/:/
-s/:*\${srcdir}:*/:/
-s/:*@srcdir@:*/:/
-s/^\([^=]*=[	 ]*\):*/\1/
-s/:*$//
-s/^[^=]*=[	 ]*$//
-}'
-fi
-
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-fi # test -n "$CONFIG_FILES"
-
-# Set up the scripts for CONFIG_HEADERS section.
-# No need to generate them if there are no CONFIG_HEADERS.
-# This happens for instance with `./config.status Makefile'.
-if test -n "$CONFIG_HEADERS"; then
-cat >"$tmp/defines.awk" <<\_ACAWK ||
-BEGIN {
-_ACEOF
-
-# Transform confdefs.h into an awk script `defines.awk', embedded as
-# here-document in config.status, that substitutes the proper values into
-# config.h.in to produce config.h.
-
-# Create a delimiter string that does not exist in confdefs.h, to ease
-# handling of long lines.
-ac_delim='%!_!# '
-for ac_last_try in false false :; do
-  ac_t=`sed -n "/$ac_delim/p" confdefs.h`
-  if test -z "$ac_t"; then
-    break
-  elif $ac_last_try; then
-    as_fn_error "could not make $CONFIG_HEADERS" "$LINENO" 5
-  else
-    ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
-  fi
-done
-
-# For the awk script, D is an array of macro values keyed by name,
-# likewise P contains macro parameters if any.  Preserve backslash
-# newline sequences.
-
-ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]*
-sed -n '
-s/.\{148\}/&'"$ac_delim"'/g
-t rset
-:rset
-s/^[	 ]*#[	 ]*define[	 ][	 ]*/ /
-t def
-d
-:def
-s/\\$//
-t bsnl
-s/["\\]/\\&/g
-s/^ \('"$ac_word_re"'\)\(([^()]*)\)[	 ]*\(.*\)/P["\1"]="\2"\
-D["\1"]=" \3"/p
-s/^ \('"$ac_word_re"'\)[	 ]*\(.*\)/D["\1"]=" \2"/p
-d
-:bsnl
-s/["\\]/\\&/g
-s/^ \('"$ac_word_re"'\)\(([^()]*)\)[	 ]*\(.*\)/P["\1"]="\2"\
-D["\1"]=" \3\\\\\\n"\\/p
-t cont
-s/^ \('"$ac_word_re"'\)[	 ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p
-t cont
-d
-:cont
-n
-s/.\{148\}/&'"$ac_delim"'/g
-t clear
-:clear
-s/\\$//
-t bsnlc
-s/["\\]/\\&/g; s/^/"/; s/$/"/p
-d
-:bsnlc
-s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p
-b cont
-' <confdefs.h | sed '
-s/'"$ac_delim"'/"\\\
-"/g' >>$CONFIG_STATUS || ac_write_fail=1
-
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-  for (key in D) D_is_set[key] = 1
-  FS = ""
-}
-/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ {
-  line = \$ 0
-  split(line, arg, " ")
-  if (arg[1] == "#") {
-    defundef = arg[2]
-    mac1 = arg[3]
-  } else {
-    defundef = substr(arg[1], 2)
-    mac1 = arg[2]
-  }
-  split(mac1, mac2, "(") #)
-  macro = mac2[1]
-  prefix = substr(line, 1, index(line, defundef) - 1)
-  if (D_is_set[macro]) {
-    # Preserve the white space surrounding the "#".
-    print prefix "define", macro P[macro] D[macro]
-    next
-  } else {
-    # Replace #undef with comments.  This is necessary, for example,
-    # in the case of _POSIX_SOURCE, which is predefined and required
-    # on some systems where configure will not decide to define it.
-    if (defundef == "undef") {
-      print "/*", prefix defundef, macro, "*/"
-      next
-    }
-  }
-}
-{ print }
-_ACAWK
-_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-  as_fn_error "could not setup config headers machinery" "$LINENO" 5
-fi # test -n "$CONFIG_HEADERS"
-
-
-eval set X "  :F $CONFIG_FILES  :H $CONFIG_HEADERS    "
-shift
-for ac_tag
-do
-  case $ac_tag in
-  :[FHLC]) ac_mode=$ac_tag; continue;;
-  esac
-  case $ac_mode$ac_tag in
-  :[FHL]*:*);;
-  :L* | :C*:*) as_fn_error "invalid tag \`$ac_tag'" "$LINENO" 5;;
-  :[FH]-) ac_tag=-:-;;
-  :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
-  esac
-  ac_save_IFS=$IFS
-  IFS=:
-  set x $ac_tag
-  IFS=$ac_save_IFS
-  shift
-  ac_file=$1
-  shift
-
-  case $ac_mode in
-  :L) ac_source=$1;;
-  :[FH])
-    ac_file_inputs=
-    for ac_f
-    do
-      case $ac_f in
-      -) ac_f="$tmp/stdin";;
-      *) # Look for the file first in the build tree, then in the source tree
-	 # (if the path is not absolute).  The absolute path cannot be DOS-style,
-	 # because $ac_f cannot contain `:'.
-	 test -f "$ac_f" ||
-	   case $ac_f in
-	   [\\/$]*) false;;
-	   *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
-	   esac ||
-	   as_fn_error "cannot find input file: \`$ac_f'" "$LINENO" 5;;
-      esac
-      case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
-      as_fn_append ac_file_inputs " '$ac_f'"
-    done
-
-    # Let's still pretend it is `configure' which instantiates (i.e., don't
-    # use $as_me), people would be surprised to read:
-    #    /* config.h.  Generated by config.status.  */
-    configure_input='Generated from '`
-	  $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
-	`' by configure.'
-    if test x"$ac_file" != x-; then
-      configure_input="$ac_file.  $configure_input"
-      { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5
-$as_echo "$as_me: creating $ac_file" >&6;}
-    fi
-    # Neutralize special characters interpreted by sed in replacement strings.
-    case $configure_input in #(
-    *\&* | *\|* | *\\* )
-       ac_sed_conf_input=`$as_echo "$configure_input" |
-       sed 's/[\\\\&|]/\\\\&/g'`;; #(
-    *) ac_sed_conf_input=$configure_input;;
-    esac
-
-    case $ac_tag in
-    *:-:* | *:-) cat >"$tmp/stdin" \
-      || as_fn_error "could not create $ac_file" "$LINENO" 5 ;;
-    esac
-    ;;
-  esac
-
-  ac_dir=`$as_dirname -- "$ac_file" ||
-$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
-	 X"$ac_file" : 'X\(//\)[^/]' \| \
-	 X"$ac_file" : 'X\(//\)$' \| \
-	 X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
-$as_echo X"$ac_file" |
-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)[^/].*/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-  as_dir="$ac_dir"; as_fn_mkdir_p
-  ac_builddir=.
-
-case "$ac_dir" in
-.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
-*)
-  ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
-  # A ".." for each directory in $ac_dir_suffix.
-  ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
-  case $ac_top_builddir_sub in
-  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
-  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
-  esac ;;
-esac
-ac_abs_top_builddir=$ac_pwd
-ac_abs_builddir=$ac_pwd$ac_dir_suffix
-# for backward compatibility:
-ac_top_builddir=$ac_top_build_prefix
-
-case $srcdir in
-  .)  # We are building in place.
-    ac_srcdir=.
-    ac_top_srcdir=$ac_top_builddir_sub
-    ac_abs_top_srcdir=$ac_pwd ;;
-  [\\/]* | ?:[\\/]* )  # Absolute name.
-    ac_srcdir=$srcdir$ac_dir_suffix;
-    ac_top_srcdir=$srcdir
-    ac_abs_top_srcdir=$srcdir ;;
-  *) # Relative name.
-    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
-    ac_top_srcdir=$ac_top_build_prefix$srcdir
-    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
-esac
-ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
-
-
-  case $ac_mode in
-  :F)
-  #
-  # CONFIG_FILE
-  #
-
-_ACEOF
-
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-# If the template does not know about datarootdir, expand it.
-# FIXME: This hack should be removed a few years after 2.60.
-ac_datarootdir_hack=; ac_datarootdir_seen=
-ac_sed_dataroot='
-/datarootdir/ {
-  p
-  q
-}
-/@datadir@/p
-/@docdir@/p
-/@infodir@/p
-/@localedir@/p
-/@mandir@/p'
-case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
-*datarootdir*) ac_datarootdir_seen=yes;;
-*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
-  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
-$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
-_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-  ac_datarootdir_hack='
-  s&@datadir@&$datadir&g
-  s&@docdir@&$docdir&g
-  s&@infodir@&$infodir&g
-  s&@localedir@&$localedir&g
-  s&@mandir@&$mandir&g
-  s&\\\${datarootdir}&$datarootdir&g' ;;
-esac
-_ACEOF
-
-# Neutralize VPATH when `$srcdir' = `.'.
-# Shell code in configure.ac might set extrasub.
-# FIXME: do we really want to maintain this feature?
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-ac_sed_extra="$ac_vpsub
-$extrasub
-_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-:t
-/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
-s|@configure_input@|$ac_sed_conf_input|;t t
-s&@top_builddir@&$ac_top_builddir_sub&;t t
-s&@top_build_prefix@&$ac_top_build_prefix&;t t
-s&@srcdir@&$ac_srcdir&;t t
-s&@abs_srcdir@&$ac_abs_srcdir&;t t
-s&@top_srcdir@&$ac_top_srcdir&;t t
-s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
-s&@builddir@&$ac_builddir&;t t
-s&@abs_builddir@&$ac_abs_builddir&;t t
-s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
-$ac_datarootdir_hack
-"
-eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \
-  || as_fn_error "could not create $ac_file" "$LINENO" 5
-
-test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
-  { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } &&
-  { ac_out=`sed -n '/^[	 ]*datarootdir[	 ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } &&
-  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
-which seems to be undefined.  Please make sure it is defined." >&5
-$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
-which seems to be undefined.  Please make sure it is defined." >&2;}
-
-  rm -f "$tmp/stdin"
-  case $ac_file in
-  -) cat "$tmp/out" && rm -f "$tmp/out";;
-  *) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";;
-  esac \
-  || as_fn_error "could not create $ac_file" "$LINENO" 5
- ;;
-  :H)
-  #
-  # CONFIG_HEADER
-  #
-  if test x"$ac_file" != x-; then
-    {
-      $as_echo "/* $configure_input  */" \
-      && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs"
-    } >"$tmp/config.h" \
-      || as_fn_error "could not create $ac_file" "$LINENO" 5
-    if diff "$ac_file" "$tmp/config.h" >/dev/null 2>&1; then
-      { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5
-$as_echo "$as_me: $ac_file is unchanged" >&6;}
-    else
-      rm -f "$ac_file"
-      mv "$tmp/config.h" "$ac_file" \
-	|| as_fn_error "could not create $ac_file" "$LINENO" 5
-    fi
-  else
-    $as_echo "/* $configure_input  */" \
-      && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" \
-      || as_fn_error "could not create -" "$LINENO" 5
-  fi
- ;;
-
-
-  esac
-
-done # for ac_tag
-
-
-as_fn_exit 0
-_ACEOF
-ac_clean_files=$ac_clean_files_save
-
-test $ac_write_fail = 0 ||
-  as_fn_error "write failure creating $CONFIG_STATUS" "$LINENO" 5
-
-
-# configure is writing to config.log, and then calls config.status.
-# config.status does its own redirection, appending to config.log.
-# Unfortunately, on DOS this fails, as config.log is still kept open
-# by configure, so config.status won't be able to write to it; its
-# output is simply discarded.  So we exec the FD to /dev/null,
-# effectively closing config.log, so it can be properly (re)opened and
-# appended to by config.status.  When coming back to configure, we
-# need to make the FD available again.
-if test "$no_create" != yes; then
-  ac_cs_success=:
-  ac_config_status_args=
-  test "$silent" = yes &&
-    ac_config_status_args="$ac_config_status_args --quiet"
-  exec 5>/dev/null
-  $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
-  exec 5>>config.log
-  # Use ||, not &&, to avoid exiting from the if with $? = 1, which
-  # would make configure fail if this is the last instruction.
-  $ac_cs_success || as_fn_exit $?
-fi
-if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
-$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
-fi
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdbool.h that conforms to C99" >&5
-$as_echo_n "checking for stdbool.h that conforms to C99... " >&6; }
-if test "${ac_cv_header_stdbool_h+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-#include <stdbool.h>
-#ifndef bool
- "error: bool is not defined"
-#endif
-#ifndef false
- "error: false is not defined"
-#endif
-#if false
- "error: false is not 0"
-#endif
-#ifndef true
- "error: true is not defined"
-#endif
-#if true != 1
- "error: true is not 1"
-#endif
-#ifndef __bool_true_false_are_defined
- "error: __bool_true_false_are_defined is not defined"
-#endif
-
-	struct s { _Bool s: 1; _Bool t; } s;
-
-	char a[true == 1 ? 1 : -1];
-	char b[false == 0 ? 1 : -1];
-	char c[__bool_true_false_are_defined == 1 ? 1 : -1];
-	char d[(bool) 0.5 == true ? 1 : -1];
-	bool e = &s;
-	char f[(_Bool) 0.0 == false ? 1 : -1];
-	char g[true];
-	char h[sizeof (_Bool)];
-	char i[sizeof s.t];
-	enum { j = false, k = true, l = false * true, m = true * 256 };
-	/* The following fails for
-	   HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */
-	_Bool n[m];
-	char o[sizeof n == m * sizeof n[0] ? 1 : -1];
-	char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1];
-#	if defined __xlc__ || defined __GNUC__
-	 /* Catch a bug in IBM AIX xlc compiler version 6.0.0.0
-	    reported by James Lemley on 2005-10-05; see
-	    http://lists.gnu.org/archive/html/bug-coreutils/2005-10/msg00086.html
-	    This test is not quite right, since xlc is allowed to
-	    reject this program, as the initializer for xlcbug is
-	    not one of the forms that C requires support for.
-	    However, doing the test right would require a runtime
-	    test, and that would make cross-compilation harder.
-	    Let us hope that IBM fixes the xlc bug, and also adds
-	    support for this kind of constant expression.  In the
-	    meantime, this test will reject xlc, which is OK, since
-	    our stdbool.h substitute should suffice.  We also test
-	    this with GCC, where it should work, to detect more
-	    quickly whether someone messes up the test in the
-	    future.  */
-	 char digs[] = "0123456789";
-	 int xlcbug = 1 / (&(digs + 5)[-2 + (bool) 1] == &digs[4] ? 1 : -1);
-#	endif
-	/* Catch a bug in an HP-UX C compiler.  See
-	   http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html
-	   http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html
-	 */
-	_Bool q = true;
-	_Bool *pq = &q;
-
-int
-main ()
-{
-
-	*pq |= q;
-	*pq |= ! q;
-	/* Refer to every declared value, to avoid compiler optimizations.  */
-	return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l
-		+ !m + !n + !o + !p + !q + !pq);
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_cv_header_stdbool_h=yes
-else
-  ac_cv_header_stdbool_h=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdbool_h" >&5
-$as_echo "$ac_cv_header_stdbool_h" >&6; }
-ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default"
-if test "x$ac_cv_type__Bool" = x""yes; then :
-
-cat >>confdefs.h <<_ACEOF
-#define HAVE__BOOL 1
-_ACEOF
-
-
-fi
-
-if test $ac_cv_header_stdbool_h = yes; then
-
-$as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h
-
-fi
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5
-$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; }
-set x ${MAKE-make}
-ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'`
-if { as_var=ac_cv_prog_make_${ac_make}_set; eval "test \"\${$as_var+set}\" = set"; }; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat >conftest.make <<\_ACEOF
-SHELL = /bin/sh
-all:
-	@echo '@@@%%%=$(MAKE)=@@@%%%'
-_ACEOF
-# GNU make sometimes prints "make[1]: Entering...", which would confuse us.
-case `${MAKE-make} -f conftest.make 2>/dev/null` in
-  *@@@%%%=?*=@@@%%%*)
-    eval ac_cv_prog_make_${ac_make}_set=yes;;
-  *)
-    eval ac_cv_prog_make_${ac_make}_set=no;;
-esac
-rm -f conftest.make
-fi
-if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-  SET_MAKE=
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-  SET_MAKE="MAKE=${MAKE-make}"
-fi
-
diff --git a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure.ac b/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure.ac
deleted file mode 100644
index ffaa458..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure.ac
+++ /dev/null
@@ -1,65 +0,0 @@
-#                                               -*- Autoconf -*-
-# Process this file with autoconf to produce a configure script.
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-AC_PREREQ(2.59)
-AC_INIT([runAs],[0.1])
-
-#changing default prefix value to empty string, so that binary does not
-#gets installed within system
-AC_PREFIX_DEFAULT(.)
-
-#add new arguments --with-home
-AC_ARG_WITH(home,[--with-home path to hadoop home dir])
-AC_CONFIG_SRCDIR([main.c])
-AC_CONFIG_HEADER([runAs.h])
-
-# Checks for programs.
-AC_PROG_CC
-
-# Checks for libraries.
-
-# Checks for header files.
-AC_HEADER_STDC
-AC_CHECK_HEADERS([stdlib.h string.h unistd.h fcntl.h])
-
-#check for HADOOP_PREFIX
-if test "$with_home" != ""
-then
-AC_DEFINE_UNQUOTED(HADOOP_PREFIX,"$with_home")
-fi
-
-# Checks for typedefs, structures, and compiler characteristics.
-AC_C_CONST
-AC_TYPE_PID_T
-AC_TYPE_MODE_T
-AC_TYPE_SIZE_T
-
-# Checks for library functions.
-AC_FUNC_MALLOC
-AC_FUNC_REALLOC
-AC_FUNC_CHOWN
-AC_CHECK_FUNCS([strerror memset mkdir rmdir strdup])
-
-AC_CONFIG_FILES([Makefile])
-AC_OUTPUT
-
-AC_HEADER_STDBOOL
-AC_PROG_MAKE_SET
diff --git a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/main.c b/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/main.c
deleted file mode 100644
index e31635f..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/main.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "runAs.h"
-
-/**
- * The binary would be accepting the command of following format:
- * cluster-controller user hostname hadoop-daemon.sh-command
- */
-int main(int argc, char **argv) {
-  int errorcode;
-  char *user;
-  char *hostname;
-  char *command;
-  struct passwd user_detail;
-  int i = 1;
-  /*
-   * Minimum number of arguments required for the binary to perform.
-   */
-  if (argc < 4) {
-    fprintf(stderr, "Invalid number of arguments passed to the binary\n");
-    return INVALID_ARGUMENT_NUMER;
-  }
-
-  user = argv[1];
-  if (user == NULL) {
-    fprintf(stderr, "Invalid user name\n");
-    return INVALID_USER_NAME;
-  }
-
-  if (getuserdetail(user, &user_detail) != 0) {
-    fprintf(stderr, "Invalid user name\n");
-    return INVALID_USER_NAME;
-  }
-
-  if (user_detail.pw_gid == 0 || user_detail.pw_uid == 0) {
-      fprintf(stderr, "Cannot run tasks as super user\n");
-      return SUPER_USER_NOT_ALLOWED_TO_RUN_COMMANDS;
-  }
-
-  hostname = argv[2];
-  command = argv[3];
-  return process_controller_command(user, hostname, command);
-}
diff --git a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.c b/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.c
deleted file mode 100644
index 9b7803c..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "runAs.h"
-
-/*
- * Function to get the user details populated given a user name. 
- */
-int getuserdetail(char *user, struct passwd *user_detail) {
-  struct passwd *tempPwdPtr;
-  int size = sysconf(_SC_GETPW_R_SIZE_MAX);
-  char pwdbuffer[size];
-  if ((getpwnam_r(user, user_detail, pwdbuffer, size, &tempPwdPtr)) != 0) {
-    fprintf(stderr, "Invalid user provided to getpwnam\n");
-    return -1;
-  }
-  return 0;
-}
-
-/**
- * Function to switch the user identity and set the appropriate 
- * group control as the user specified in the argument.
- */
-int switchuser(char *user) {
-  //populate the user details
-  struct passwd user_detail;
-  if ((getuserdetail(user, &user_detail)) != 0) {
-    return INVALID_USER_NAME;
-  }
-  //set the right supplementary groups for the user.
-  if (initgroups(user_detail.pw_name, user_detail.pw_gid) != 0) {
-    fprintf(stderr, "Init groups call for the user : %s failed\n",
-        user_detail.pw_name);
-    return INITGROUPS_FAILED;
-  }
-  errno = 0;
-  //switch the group.
-  setgid(user_detail.pw_gid);
-  if (errno != 0) {
-    fprintf(stderr, "Setgid for the user : %s failed\n", user_detail.pw_name);
-    return SETUID_OPER_FAILED;
-  }
-  errno = 0;
-  //swith the user
-  setuid(user_detail.pw_uid);
-  if (errno != 0) {
-    fprintf(stderr, "Setuid for the user : %s failed\n", user_detail.pw_name);
-    return SETUID_OPER_FAILED;
-  }
-  errno = 0;
-  //set the effective user id.
-  seteuid(user_detail.pw_uid);
-  if (errno != 0) {
-    fprintf(stderr, "Seteuid for the user : %s failed\n", user_detail.pw_name);
-    return SETUID_OPER_FAILED;
-  }
-  return 0;
-}
-
-/*
- * Top level method which processes a cluster management
- * command.
- */
-int process_cluster_command(char * user,  char * node , char *command) {
-  char *finalcommandstr;
-  int len;
-  int errorcode = 0;
-  if (strncmp(command, "", strlen(command)) == 0) {
-    fprintf(stderr, "Invalid command passed\n");
-    return INVALID_COMMAND_PASSED;
-  }
-  len = STRLEN + strlen(command);
-  finalcommandstr = (char *) malloc((len + 1) * sizeof(char));
-  snprintf(finalcommandstr, len, SCRIPT_DIR_PATTERN, HADOOP_PREFIX,
-      command);
-  finalcommandstr[len + 1] = '\0';
-  errorcode = switchuser(user);
-  if (errorcode != 0) {
-    fprintf(stderr, "switch user failed\n");
-    return errorcode;
-  }
-  errno = 0;
-  execlp(SSH_COMMAND, SSH_COMMAND, node, finalcommandstr, NULL);
-  if (errno != 0) {
-    fprintf(stderr, "Excelp failed dude to : %s\n", strerror(errno));
-  }
-  return 0;
-}
-
-/*
- * Process cluster controller command the API exposed to the 
- * main in order to execute the cluster commands.
- */
-int process_controller_command(char *user, char * node, char *command) {
-  return process_cluster_command(user, node, command);
-}
diff --git a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.h.in b/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.h.in
deleted file mode 100644
index 4cdab71..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.h.in
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <string.h>
-#include <errno.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <pwd.h>
-#include <assert.h>
-#include <getopt.h>
-#include <grp.h>
-
-/*
-* List of possible error codes.
-*/
-enum errorcodes {
-  INVALID_ARGUMENT_NUMER = 1,
-  INVALID_USER_NAME, //2
-  SUPER_USER_NOT_ALLOWED_TO_RUN_COMMANDS, //3
-  INITGROUPS_FAILED, //4
-  SETUID_OPER_FAILED, //5
-  INVALID_COMMAND_PASSED, //6
-};
-
-#undef HADOOP_PREFIX
-
-#define SSH_COMMAND "ssh"
-
-#define SCRIPT_DIR_PATTERN "%s/bin/hadoop-daemon.sh %s" //%s to be substituded 
-
-#define STRLEN strlen(SCRIPT_DIR_PATTERN) + strlen(HADOOP_PREFIX)
-
-/*
- * Function to get the user details populated given a user name. 
- */
-int getuserdetails(char *user, struct passwd *user_detail);
-
- /*
- * Process cluster controller command the API exposed to the 
- * main in order to execute the cluster commands.
- */
-int process_controller_command(char *user, char *node, char *command);
diff --git a/hadoop-common-project/hadoop-common/src/test/system/conf/hadoop-policy-system-test.xml b/hadoop-common-project/hadoop-common/src/test/system/conf/hadoop-policy-system-test.xml
deleted file mode 100644
index b2c3735..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/conf/hadoop-policy-system-test.xml
+++ /dev/null
@@ -1,68 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<configuration>
-<!--
-  This is Herriot specific protocols. This section shouldn't be present in
-  a production cluster configuration. This file needs to be linked up to the
-  main conf/hadoop-policy.xml in the deployment process
--->
-  <property>
-    <name>security.daemon.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DaemonProtocol, extended by all other
-    Herriot RPC protocols.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.nn.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NNProtocol, used by the
-    Herriot AbstractDaemonCluster's implementations to connect to a remote
-    NameNode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.dn.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DNProtocol, used by the
-    Herriot AbstractDaemonCluster's implementations to connect to a remote
-    DataNode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.tt.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TTProtocol, used by the
-    Herriot AbstractDaemonCluster's implementations to connect to a remote
-    TaskTracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>
diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonClient.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonClient.java
deleted file mode 100644
index 7254aa7..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonClient.java
+++ /dev/null
@@ -1,599 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.io.IOException;
-import java.util.*;
-
-import org.junit.Assert;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.test.system.process.RemoteProcess;
-
-import javax.management.*;
-import javax.management.remote.JMXConnector;
-import javax.management.remote.JMXConnectorFactory;
-import javax.management.remote.JMXServiceURL;
-
-/**
- * Abstract class which encapsulates the DaemonClient which is used in the 
- * system tests.<br/>
- * 
- * @param PROXY the proxy implementation of a specific Daemon 
- */
-public abstract class AbstractDaemonClient<PROXY extends DaemonProtocol> {
-  private Configuration conf;
-  private Boolean jmxEnabled = null;
-  private MBeanServerConnection connection;
-  private int jmxPortNumber = -1;
-  private RemoteProcess process;
-  private boolean connected;
-
-  private static final Log LOG = LogFactory.getLog(AbstractDaemonClient.class);
-  private static final String HADOOP_JMX_DOMAIN = "Hadoop";
-  private static final String HADOOP_OPTS_ENV = "HADOOP_OPTS";
-
-  /**
-   * Create a Daemon client.<br/>
-   * 
-   * @param conf client to be used by proxy to connect to Daemon.
-   * @param process the Daemon process to manage the particular daemon.
-   * 
-   * @throws IOException on RPC error
-   */
-  public AbstractDaemonClient(Configuration conf, RemoteProcess process) 
-      throws IOException {
-    this.conf = conf;
-    this.process = process;
-  }
-
-  /**
-   * Gets if the client is connected to the Daemon <br/>
-   * 
-   * @return true if connected.
-   */
-  public boolean isConnected() {
-    return connected;
-  }
-
-  protected void setConnected(boolean connected) {
-    this.connected = connected;
-  }
-
-  /**
-   * Create an RPC proxy to the daemon <br/>
-   * 
-   * @throws IOException on RPC error
-   */
-  public abstract void connect() throws IOException;
-
-  /**
-   * Disconnect the underlying RPC proxy to the daemon.<br/>
-   * @throws IOException in case of communication errors
-   */
-  public abstract void disconnect() throws IOException;
-
-  /**
-   * Get the proxy to connect to a particular service Daemon.<br/>
-   * 
-   * @return proxy to connect to a particular service Daemon.
-   */
-  protected abstract PROXY getProxy();
-
-  /**
-   * Gets the daemon level configuration.<br/>
-   * 
-   * @return configuration using which daemon is running
-   */
-  public Configuration getConf() {
-    return conf;
-  }
-
-  /**
-   * Gets the host on which Daemon is currently running. <br/>
-   * 
-   * @return hostname
-   */
-  public String getHostName() {
-    return process.getHostName();
-  }
-
-  /**
-   * Gets if the Daemon is ready to accept RPC connections. <br/>
-   * 
-   * @return true if daemon is ready.
-   * @throws IOException on RPC error
-   */
-  public boolean isReady() throws IOException {
-    return getProxy().isReady();
-  }
-
-  /**
-   * Kills the Daemon process <br/>
-   * @throws IOException on RPC error
-   */
-  public void kill() throws IOException {
-    process.kill();
-  }
-
-  /**
-   * Checks if the Daemon process is alive or not <br/>
-   * @throws IOException on RPC error
-   */
-  public void ping() throws IOException {
-    getProxy().ping();
-  }
-
-  /**
-   * Start up the Daemon process. <br/>
-   * @throws IOException on RPC error
-   */
-  public void start() throws IOException {
-    process.start();
-  }
-
-  /**
-   * Get system level view of the Daemon process.
-   * 
-   * @return returns system level view of the Daemon process.
-   * 
-   * @throws IOException on RPC error. 
-   */
-  public ProcessInfo getProcessInfo() throws IOException {
-    return getProxy().getProcessInfo();
-  }
-
-  /**
-   * Abstract method to retrieve the name of a daemon specific env. var
-   * @return name of Hadoop environment variable containing a daemon options
-   */
-  abstract public String getHadoopOptsEnvName ();
-
-  /**
-   * Checks remote daemon process info to see if certain JMX sys. properties
-   * are available and reckon if the JMX service is enabled on the remote side
-   *
-   * @return <code>boolean</code> code indicating availability of remote JMX
-   * @throws IOException is throws in case of communication errors
-   */
-  public boolean isJmxEnabled() throws IOException {
-    return isJmxEnabled(HADOOP_OPTS_ENV) ||
-        isJmxEnabled(getHadoopOptsEnvName());
-  }
-
-  /**
-   * Checks remote daemon process info to see if certain JMX sys. properties
-   * are available and reckon if the JMX service is enabled on the remote side
-   *
-   * @param envivar name of an evironment variable to be searched
-   * @return <code>boolean</code> code indicating availability of remote JMX
-   * @throws IOException is throws in case of communication errors
-   */
-  protected boolean isJmxEnabled(String envivar) throws IOException {
-    if (jmxEnabled != null) return jmxEnabled;
-    boolean ret = false;
-    String jmxRemoteString = "-Dcom.sun.management.jmxremote";
-    String hadoopOpts = getProcessInfo().getEnv().get(envivar);
-    LOG.debug("Looking into " + hadoopOpts + " from " + envivar);
-    List<String> options = Arrays.asList(hadoopOpts.split(" "));
-    ret = options.contains(jmxRemoteString);
-    jmxEnabled = ret;
-    return ret;
-  }
-
-  /**
-   * Checks remote daemon process info to find remote JMX server port number
-   * By default this method will look into "HADOOP_OPTS" variable only.
-   * @return number of remote JMX server or -1 if it can't be found
-   * @throws IOException is throws in case of communication errors
-   * @throws IllegalArgumentException if non-integer port is set
-   *  in the remote process info
-   */
-  public int getJmxPortNumber() throws IOException, IllegalArgumentException {
-    int portNo = getJmxPortNumber(HADOOP_OPTS_ENV);
-    return portNo != -1 ? portNo : getJmxPortNumber(getHadoopOptsEnvName());
-  }
-
-  /**
-   * Checks remote daemon process info to find remote JMX server port number
-   *
-   * @param envivar name of the env. var. to look for JMX specific settings
-   * @return number of remote JMX server or -1 if it can't be found
-   * @throws IOException is throws in case of communication errors
-   * @throws IllegalArgumentException if non-integer port is set
-   *  in the remote process info
-   */
-  protected int getJmxPortNumber(final String envivar) throws
-      IOException, IllegalArgumentException {
-    if (jmxPortNumber != -1) return jmxPortNumber;
-    String jmxPortString = "-Dcom.sun.management.jmxremote.port";
-
-    String hadoopOpts = getProcessInfo().getEnv().get(envivar);
-    int portNumber = -1;
-    boolean found = false;
-    String[] options = hadoopOpts.split(" ");
-     for (String option : options) {
-       if (option.startsWith(jmxPortString)) {
-         found = true;
-         try {
-           portNumber = Integer.parseInt(option.split("=")[1]);
-         } catch (NumberFormatException e) {
-           throw new IllegalArgumentException("JMX port number isn't integer");
-         }
-         break;
-       }
-     }
-     if (!found)
-       throw new IllegalArgumentException("Can't detect JMX port number");
-    jmxPortNumber = portNumber;
-    return jmxPortNumber;
-  }
-
-  /**
-   * Return a file status object that represents the path.
-   * @param path
-   *          given path
-   * @param local
-   *          whether the path is local or not
-   * @return a FileStatus object
-   * @throws IOException see specific implementation
-   */
-  public FileStatus getFileStatus(String path, boolean local) throws IOException {
-    return getProxy().getFileStatus(path, local);
-  }
-
-  /**
-   * Create a file with full permissions in a file system.
-   * @param path - source path where the file has to create.
-   * @param fileName - file name
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void createFile(String path, String fileName, 
-      boolean local) throws IOException {
-    getProxy().createFile(path, fileName, null, local);
-  }
-
-  /**
-   * Create a file with given permissions in a file system.
-   * @param path - source path where the file has to create.
-   * @param fileName - file name.
-   * @param permission - file permissions.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void createFile(String path, String fileName, 
-     FsPermission permission,  boolean local) throws IOException {
-    getProxy().createFile(path, fileName, permission, local);
-  }
-
-  /**
-   * Create a folder with default permissions in a file system.
-   * @param path - source path where the file has to be creating.
-   * @param folderName - folder name.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs. 
-   */
-  public void createFolder(String path, String folderName, 
-     boolean local) throws IOException {
-    getProxy().createFolder(path, folderName, null, local);
-  }
-
-  /**
-   * Create a folder with given permissions in a file system.
-   * @param path - source path where the file has to be creating.
-   * @param folderName - folder name.
-   * @param permission - folder permissions.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void createFolder(String path, String folderName, 
-     FsPermission permission,  boolean local) throws IOException {
-    getProxy().createFolder(path, folderName, permission, local);
-  }
-
-  /**
-   * List the statuses of the files/directories in the given path if the path is
-   * a directory.
-   * 
-   * @param path
-   *          given path
-   * @param local
-   *          whether the path is local or not
-   * @return the statuses of the files/directories in the given patch
-   * @throws IOException on RPC error. 
-   */
-  public FileStatus[] listStatus(String path, boolean local) 
-    throws IOException {
-    return getProxy().listStatus(path, local);
-  }
-
-  /**
-   * List the statuses of the files/directories in the given path if the path is
-   * a directory recursive/nonrecursively depending on parameters
-   * 
-   * @param path
-   *          given path
-   * @param local
-   *          whether the path is local or not
-   * @param recursive 
-   *          whether to recursively get the status
-   * @return the statuses of the files/directories in the given patch
-   * @throws IOException is thrown on RPC error. 
-   */
-  public FileStatus[] listStatus(String path, boolean local, boolean recursive)
-    throws IOException {
-    List<FileStatus> status = new ArrayList<FileStatus>();
-    addStatus(status, path, local, recursive);
-    return status.toArray(new FileStatus[0]);
-  }
-
-  private void addStatus(List<FileStatus> status, String f, 
-      boolean local, boolean recursive) 
-    throws IOException {
-    FileStatus[] fs = listStatus(f, local);
-    if (fs != null) {
-      for (FileStatus fileStatus : fs) {
-        if (!f.equals(fileStatus.getPath().toString())) {
-          status.add(fileStatus);
-          if (recursive) {
-            addStatus(status, fileStatus.getPath().toString(), local, recursive);
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Gets number of times FATAL log messages where logged in Daemon logs. 
-   * <br/>
-   * Pattern used for searching is FATAL. <br/>
-   * @param excludeExpList list of exception to exclude 
-   * @return number of occurrence of fatal message.
-   * @throws IOException in case of communication errors
-   */
-  public int getNumberOfFatalStatementsInLog(String [] excludeExpList)
-      throws IOException {
-    DaemonProtocol proxy = getProxy();
-    String pattern = "FATAL";
-    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
-  }
-
-  /**
-   * Gets number of times ERROR log messages where logged in Daemon logs. 
-   * <br/>
-   * Pattern used for searching is ERROR. <br/>
-   * @param excludeExpList list of exception to exclude 
-   * @return number of occurrence of error message.
-   * @throws IOException is thrown on RPC error. 
-   */
-  public int getNumberOfErrorStatementsInLog(String[] excludeExpList) 
-      throws IOException {
-    DaemonProtocol proxy = getProxy();
-    String pattern = "ERROR";    
-    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
-  }
-
-  /**
-   * Gets number of times Warning log messages where logged in Daemon logs. 
-   * <br/>
-   * Pattern used for searching is WARN. <br/>
-   * @param excludeExpList list of exception to exclude 
-   * @return number of occurrence of warning message.
-   * @throws IOException thrown on RPC error. 
-   */
-  public int getNumberOfWarnStatementsInLog(String[] excludeExpList) 
-      throws IOException {
-    DaemonProtocol proxy = getProxy();
-    String pattern = "WARN";
-    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
-  }
-
-  /**
-   * Gets number of time given Exception were present in log file. <br/>
-   * 
-   * @param e exception class.
-   * @param excludeExpList list of exceptions to exclude. 
-   * @return number of exceptions in log
-   * @throws IOException is thrown on RPC error. 
-   */
-  public int getNumberOfExceptionsInLog(Exception e,
-      String[] excludeExpList) throws IOException {
-    DaemonProtocol proxy = getProxy();
-    String pattern = e.getClass().getSimpleName();    
-    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
-  }
-
-  /**
-   * Number of times ConcurrentModificationException present in log file. 
-   * <br/>
-   * @param excludeExpList list of exceptions to exclude.
-   * @return number of times exception in log file.
-   * @throws IOException is thrown on RPC error. 
-   */
-  public int getNumberOfConcurrentModificationExceptionsInLog(
-      String[] excludeExpList) throws IOException {
-    return getNumberOfExceptionsInLog(new ConcurrentModificationException(),
-        excludeExpList);
-  }
-
-  private int errorCount;
-  private int fatalCount;
-  private int concurrentExceptionCount;
-
-  /**
-   * Populate the initial exception counts to be used to assert once a testcase
-   * is done there was no exception in the daemon when testcase was run.
-   * @param excludeExpList list of exceptions to exclude
-   * @throws IOException is thrown on RPC error. 
-   */
-  protected void populateExceptionCount(String [] excludeExpList) 
-      throws IOException {
-    errorCount = getNumberOfErrorStatementsInLog(excludeExpList);
-    LOG.info("Number of error messages in logs : " + errorCount);
-    fatalCount = getNumberOfFatalStatementsInLog(excludeExpList);
-    LOG.info("Number of fatal statement in logs : " + fatalCount);
-    concurrentExceptionCount =
-        getNumberOfConcurrentModificationExceptionsInLog(excludeExpList);
-    LOG.info("Number of concurrent modification in logs : "
-        + concurrentExceptionCount);
-  }
-
-  /**
-   * Assert if the new exceptions were logged into the log file.
-   * <br/>
-   * <b><i>
-   * Pre-req for the method is that populateExceptionCount() has 
-   * to be called before calling this method.</b></i>
-   * @param excludeExpList list of exceptions to exclude
-   * @throws IOException is thrown on RPC error. 
-   */
-  protected void assertNoExceptionsOccurred(String [] excludeExpList) 
-      throws IOException {
-    int newerrorCount = getNumberOfErrorStatementsInLog(excludeExpList);
-    LOG.info("Number of error messages while asserting :" + newerrorCount);
-    int newfatalCount = getNumberOfFatalStatementsInLog(excludeExpList);
-    LOG.info("Number of fatal messages while asserting : " + newfatalCount);
-    int newconcurrentExceptionCount =
-        getNumberOfConcurrentModificationExceptionsInLog(excludeExpList);
-    LOG.info("Number of concurrentmodification exception while asserting :"
-        + newconcurrentExceptionCount);
-    Assert.assertEquals(
-        "New Error Messages logged in the log file", errorCount, newerrorCount);
-    Assert.assertEquals(
-        "New Fatal messages logged in the log file", fatalCount, newfatalCount);
-    Assert.assertEquals(
-        "New ConcurrentModificationException in log file",
-        concurrentExceptionCount, newconcurrentExceptionCount);
-  }
-
-  /**
-   * Builds correct name of JMX object name from given domain, service name, type
-   * @param domain JMX domain name
-   * @param serviceName of the service where MBean is registered (NameNode)
-   * @param typeName of the MXBean class
-   * @return ObjectName for requested MXBean of <code>null</code> if one wasn't
-   *    found
-   * @throws java.io.IOException in if object name is malformed
-   */
-  protected ObjectName getJmxBeanName(String domain, String serviceName,
-                                      String typeName) throws IOException {
-    if (domain == null)
-      domain = HADOOP_JMX_DOMAIN;
-
-    ObjectName jmxBean;
-    try {
-      jmxBean = new ObjectName(domain + ":service=" + serviceName +
-        ",name=" + typeName);
-    } catch (MalformedObjectNameException e) {
-      LOG.debug(e.getStackTrace());
-      throw new IOException(e);
-    }
-    return jmxBean;
-  }
-
-  /**
-   * Create connection with the remote JMX server at given host and port
-   * @param host name of the remote JMX server host
-   * @param port port number of the remote JXM server host
-   * @return instance of MBeanServerConnection or <code>null</code> if one
-   *    hasn't been established
-   * @throws IOException in case of comminication errors
-   */
-  protected MBeanServerConnection establishJmxConnection(String host, int port)
-    throws IOException {
-    if (connection != null) return connection;
-    String urlPattern = null;
-    try {
-      urlPattern = "service:jmx:rmi:///jndi/rmi://" +
-        host + ":" + port +
-        "/jmxrmi";
-      JMXServiceURL url = new JMXServiceURL(urlPattern);
-      JMXConnector connector = JMXConnectorFactory.connect(url, null);
-      connection = connector.getMBeanServerConnection();
-    } catch (java.net.MalformedURLException badURLExc) {
-      LOG.debug("bad url: " + urlPattern, badURLExc);
-      throw new IOException(badURLExc);
-    }
-    return connection;
-  }
-
-  Hashtable<String, ObjectName> jmxObjectNames =
-    new Hashtable<String, ObjectName>();
-
-  /**
-   * Method implements all logic for receiving a bean's attribute.
-   * If any initializations such as establishing bean server connections, etc.
-   * are need it will do it.
-   * @param serviceName name of the service where MBean is registered (NameNode)
-   * @param type name of the MXBean class
-   * @param attributeName name of the attribute to be retrieved
-   * @return Object value of the attribute or <code>null</code> if not found
-   * @throws IOException is thrown in case of any errors
-   */
-  protected Object getJmxAttribute (String serviceName,
-                                    String type,
-                                    String attributeName)
-    throws IOException {
-    Object retAttribute = null;
-    String domain = null;
-    if (isJmxEnabled()) {
-      try {
-        MBeanServerConnection conn =
-          establishJmxConnection(getHostName(),
-              getJmxPortNumber(HADOOP_OPTS_ENV));
-        for (String d : conn.getDomains()) {
-          if (d != null && d.startsWith(HADOOP_JMX_DOMAIN))
-            domain = d;
-        }
-        if (!jmxObjectNames.containsKey(type))
-          jmxObjectNames.put(type, getJmxBeanName(domain, serviceName, type));
-        retAttribute =
-          conn.getAttribute(jmxObjectNames.get(type), attributeName);
-      } catch (MBeanException e) {
-        LOG.debug(e.getStackTrace());
-        throw new IOException(e);
-      } catch (AttributeNotFoundException e) {
-        LOG.warn(e.getStackTrace());
-        throw new IOException(e);
-      } catch (InstanceNotFoundException e) {
-        LOG.warn(e.getStackTrace());
-        throw new IOException(e);
-      } catch (ReflectionException e) {
-        LOG.debug(e.getStackTrace());
-        throw new IOException(e);
-      }
-    }
-    return retAttribute;
-  }
-
-  /**
-   * This method has to be implemented by appropriate concrete daemon client
-   * e.g. DNClient, NNClient, etc.
-   * Concrete implementation has to provide names of the service and bean type
-   * @param attributeName name of the attribute to be retrieved
-   * @return Object value of the given attribute
-   * @throws IOException is thrown in case of communication errors
-   */
-  public abstract Object getDaemonAttribute (String attributeName)
-    throws IOException;
-}
diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonCluster.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonCluster.java
deleted file mode 100644
index b1277a0..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonCluster.java
+++ /dev/null
@@ -1,537 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.io.IOException;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.FileInputStream;
-import java.io.DataInputStream;
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Iterator;
-import java.util.Enumeration;
-import java.util.Arrays;
-import java.util.Hashtable;
-import java.net.URI;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.test.system.process.ClusterProcessManager;
-import org.apache.hadoop.test.system.process.RemoteProcess;
-
-/**
- * Abstract class which represent the cluster having multiple daemons.
- */
-@SuppressWarnings("unchecked")
-public abstract class AbstractDaemonCluster {
-
-  private static final Log LOG = LogFactory.getLog(AbstractDaemonCluster.class);
-  private String [] excludeExpList ;
-  private Configuration conf;
-  protected ClusterProcessManager clusterManager;
-  private Map<Enum<?>, List<AbstractDaemonClient>> daemons = 
-    new LinkedHashMap<Enum<?>, List<AbstractDaemonClient>>();
-  private String newConfDir = null;  
-  private static final  String CONF_HADOOP_LOCAL_DIR =
-      "test.system.hdrc.hadoop.local.confdir"; 
-  private final static Object waitLock = new Object();
-  
-  /**
-   * Constructor to create a cluster client.<br/>
-   * 
-   * @param conf
-   *          Configuration to be used while constructing the cluster.
-   * @param rcluster
-   *          process manger instance to be used for managing the daemons.
-   * 
-   * @throws IOException
-   */
-  public AbstractDaemonCluster(Configuration conf,
-      ClusterProcessManager rcluster) throws IOException {
-    this.conf = conf;
-    this.clusterManager = rcluster;
-    createAllClients();
-  }
-
-  /**
-   * The method returns the cluster manager. The system test cases require an
-   * instance of HadoopDaemonRemoteCluster to invoke certain operation on the
-   * daemon.
-   * 
-   * @return instance of clusterManager
-   */
-  public ClusterProcessManager getClusterManager() {
-    return clusterManager;
-  }
-
-  protected void createAllClients() throws IOException {
-    for (RemoteProcess p : clusterManager.getAllProcesses()) {
-      List<AbstractDaemonClient> dms = daemons.get(p.getRole());
-      if (dms == null) {
-        dms = new ArrayList<AbstractDaemonClient>();
-        daemons.put(p.getRole(), dms);
-      }
-      dms.add(createClient(p));
-    }
-  }
-  
-  /**
-   * Method to create the daemon client.<br/>
-   * 
-   * @param process
-   *          to manage the daemon.
-   * @return instance of the daemon client
-   * 
-   * @throws IOException
-   */
-  protected abstract AbstractDaemonClient<DaemonProtocol> 
-    createClient(RemoteProcess process) throws IOException;
-
-  /**
-   * Get the global cluster configuration which was used to create the 
-   * cluster. <br/>
-   * 
-   * @return global configuration of the cluster.
-   */
-  public Configuration getConf() {
-    return conf;
-  }
-
-  /**
-   *
-
-  /**
-   * Return the client handle of all the Daemons.<br/>
-   * 
-   * @return map of role to daemon clients' list.
-   */
-  public Map<Enum<?>, List<AbstractDaemonClient>> getDaemons() {
-    return daemons;
-  }
-
-  /**
-   * Checks if the cluster is ready for testing. <br/>
-   * Algorithm for checking is as follows : <br/>
-   * <ul>
-   * <li> Wait for Daemon to come up </li>
-   * <li> Check if daemon is ready </li>
-   * <li> If one of the daemon is not ready, return false </li>
-   * </ul> 
-   * 
-   * @return true if whole cluster is ready.
-   * 
-   * @throws IOException
-   */
-  public boolean isReady() throws IOException {
-    for (List<AbstractDaemonClient> set : daemons.values()) {
-      for (AbstractDaemonClient daemon : set) {
-        waitForDaemon(daemon);
-        if (!daemon.isReady()) {
-          return false;
-        }
-      }
-    }
-    return true;
-  }
-
-  protected void waitForDaemon(AbstractDaemonClient d) {
-    final int TEN_SEC = 10000;
-    while(true) {
-      try {
-        LOG.info("Waiting for daemon at " + d.getHostName() + " to come up.");
-        LOG.info("Daemon might not be " +
-            "ready or the call to setReady() method hasn't been " +
-            "injected to " + d.getClass() + " ");
-        d.connect();
-        break;
-      } catch (IOException e) {
-        try {
-          Thread.sleep(TEN_SEC);
-        } catch (InterruptedException ie) {
-        }
-      }
-    }
-  }
-
-  /**
-   * Starts the cluster daemons.
-   * @throws IOException
-   */
-  public void start() throws IOException {
-    clusterManager.start();
-  }
-
-  /**
-   * Stops the cluster daemons.
-   * @throws IOException
-   */
-  public void stop() throws IOException {
-    clusterManager.stop();
-  }
-
-  /**
-   * Connect to daemon RPC ports.
-   * @throws IOException
-   */
-  public void connect() throws IOException {
-    for (List<AbstractDaemonClient> set : daemons.values()) {
-      for (AbstractDaemonClient daemon : set) {
-        daemon.connect();
-      }
-    }
-  }
-
-  /**
-   * Disconnect to daemon RPC ports.
-   * @throws IOException
-   */
-  public void disconnect() throws IOException {
-    for (List<AbstractDaemonClient> set : daemons.values()) {
-      for (AbstractDaemonClient daemon : set) {
-        daemon.disconnect();
-      }
-    }
-  }
-
-  /**
-   * Ping all the daemons of the cluster.
-   * @throws IOException
-   */
-  public void ping() throws IOException {
-    for (List<AbstractDaemonClient> set : daemons.values()) {
-      for (AbstractDaemonClient daemon : set) {
-        LOG.info("Daemon is : " + daemon.getHostName() + " pinging....");
-        daemon.ping();
-      }
-    }
-  }
-
-  /**
-   * Connect to the cluster and ensure that it is clean to run tests.
-   * @throws Exception
-   */
-  public void setUp() throws Exception {
-    while (!isReady()) {
-      Thread.sleep(1000);
-    }
-    connect();
-    ping();
-    clearAllControlActions();
-    ensureClean();
-    populateExceptionCounts();
-  }
-  
-  /**
-   * This is mainly used for the test cases to set the list of exceptions
-   * that will be excluded.
-   * @param excludeExpList list of exceptions to exclude
-   */
-  public void setExcludeExpList(String [] excludeExpList) {
-    this.excludeExpList = excludeExpList;
-  }
-  
-  public void clearAllControlActions() throws IOException {
-    for (List<AbstractDaemonClient> set : daemons.values()) {
-      for (AbstractDaemonClient daemon : set) {
-        LOG.info("Daemon is : " + daemon.getHostName() + " pinging....");
-        daemon.getProxy().clearActions();
-      }
-    }
-  }
-
-  /**
-   * Ensure that the cluster is clean to run tests.
-   * @throws IOException
-   */
-  public void ensureClean() throws IOException {
-  }
-
-  /**
-   * Ensure that cluster is clean. Disconnect from the RPC ports of the daemons.
-   * @throws IOException
-   */
-  public void tearDown() throws IOException {
-    ensureClean();
-    clearAllControlActions();
-    assertNoExceptionMessages();
-    disconnect();
-  }
-
-  /**
-   * Populate the exception counts in all the daemons so that it can be checked when 
-   * the testcase has finished running.<br/>
-   * @throws IOException
-   */
-  protected void populateExceptionCounts() throws IOException {
-    for(List<AbstractDaemonClient> lst : daemons.values()) {
-      for(AbstractDaemonClient d : lst) {
-        d.populateExceptionCount(excludeExpList);
-      }
-    }
-  }
-
-  /**
-   * Assert no exception has been thrown during the sequence of the actions.
-   * <br/>
-   * @throws IOException
-   */
-  protected void assertNoExceptionMessages() throws IOException {
-    for(List<AbstractDaemonClient> lst : daemons.values()) {
-      for(AbstractDaemonClient d : lst) {
-        d.assertNoExceptionsOccurred(excludeExpList);
-      }
-    }
-  }
-
-  /**
-   * Get the proxy user definitions from cluster from configuration.
-   * @return ProxyUserDefinitions - proxy users data like groups and hosts.
-   * @throws Exception - if no proxy users found in config.
-   */
-  public ProxyUserDefinitions getHadoopProxyUsers() throws
-     Exception {
-    Iterator itr = conf.iterator();
-    ArrayList<String> proxyUsers = new ArrayList<String>();
-    while (itr.hasNext()) {
-      if (itr.next().toString().indexOf("hadoop.proxyuser") >= 0 &&
-          itr.next().toString().indexOf("groups=") >= 0) {
-         proxyUsers.add(itr.next().toString().split("\\.")[2]);
-      }
-    }
-    if (proxyUsers.size() == 0) {
-       LOG.error("No proxy users found in the configuration.");
-       throw new Exception("No proxy users found in the configuration.");
-    }
-
-    ProxyUserDefinitions pud = new ProxyUserDefinitions() {
-      @Override
-      public boolean writeToFile(URI filePath) throws IOException {
-        throw new UnsupportedOperationException("No such method exists.");
-      };
-    };
-
-    for (String userName : proxyUsers) {
-       List<String> groups = Arrays.asList(conf.get("hadoop.proxyuser." +
-           userName + ".groups").split("//,"));
-       List<String> hosts = Arrays.asList(conf.get("hadoop.proxyuser." +
-           userName + ".hosts").split("//,"));
-       ProxyUserDefinitions.GroupsAndHost definitions =
-           pud.new GroupsAndHost();
-       definitions.setGroups(groups);
-       definitions.setHosts(hosts);
-       pud.addProxyUser(userName, definitions);
-    }
-    return pud;
-  }
-  
-  /**
-   * It's a local folder where the config file stores temporarily
-   * while serializing the object.
-   * @return String temporary local folder path for configuration.
-   */
-  private String getHadoopLocalConfDir() {
-    String hadoopLocalConfDir = conf.get(CONF_HADOOP_LOCAL_DIR);
-    if (hadoopLocalConfDir == null || hadoopLocalConfDir.isEmpty()) {
-      LOG.error("No configuration "
-          + "for the CONF_HADOOP_LOCAL_DIR passed");
-      throw new IllegalArgumentException(
-          "No Configuration passed for hadoop conf local directory");
-    }
-    return hadoopLocalConfDir;
-  }
-
-  /**
-   * It uses to restart the cluster with new configuration at runtime.<br/>
-   * @param props attributes for new configuration.
-   * @param configFile configuration file.
-   * @throws IOException if an I/O error occurs.
-   */
-  public void restartClusterWithNewConfig(Hashtable<String,?> props, 
-      String configFile) throws IOException {
-
-    String mapredConf = null;
-    String localDirPath = null;
-    File localFolderObj = null;
-    File xmlFileObj = null;
-    String confXMLFile = null;
-    Configuration initConf = new Configuration(getConf());
-    Enumeration<String> e = props.keys();
-    while (e.hasMoreElements()) {
-      String propKey = e.nextElement();
-      Object propValue = props.get(propKey);
-      initConf.set(propKey,propValue.toString());
-    }
-
-    localDirPath = getHadoopLocalConfDir();
-    localFolderObj = new File(localDirPath);
-    if (!localFolderObj.exists()) {
-      localFolderObj.mkdir();
-    }
-    confXMLFile = localDirPath + File.separator + configFile;
-    xmlFileObj = new File(confXMLFile);
-    initConf.writeXml(new FileOutputStream(xmlFileObj));
-    newConfDir = clusterManager.pushConfig(localDirPath);
-    stop();
-    waitForClusterToStop();
-    clusterManager.start(newConfDir);
-    waitForClusterToStart();
-    localFolderObj.delete();
-  }
-  
-  /**
-   * It uses to restart the cluster with default configuration.<br/>
-   * @throws IOException if an I/O error occurs.
-   */
-  public void restart() throws 
-      IOException {
-    stop();
-    waitForClusterToStop();
-    start();
-    waitForClusterToStart();
-    cleanupNewConf(newConfDir);
-  }
-
-  /**
-   * It uses to delete the new configuration folder.
-   * @param path - configuration directory path.
-   * @throws IOException if an I/O error occurs.
-   */
-  public void cleanupNewConf(String path) throws IOException {
-    File file = new File(path);
-    file.delete();
-  }
-  
-  /**
-   * It uses to wait until the cluster is stopped.<br/>
-   * @throws IOException if an I/O error occurs.
-   */
-  public void waitForClusterToStop() throws 
-      IOException {
-    List<Thread> chkDaemonStop = new ArrayList<Thread>();
-    for (List<AbstractDaemonClient> set : daemons.values()) {	  
-      for (AbstractDaemonClient daemon : set) {
-        DaemonStopThread dmStop = new DaemonStopThread(daemon);
-        chkDaemonStop.add(dmStop);
-        dmStop.start();
-      }
-    }
-
-    for (Thread daemonThread : chkDaemonStop){
-      try {
-        daemonThread.join();
-      } catch(InterruptedException intExp) {
-         LOG.warn("Interrupted while thread is joining." + intExp.getMessage());
-      }
-    }
-  }
- 
-  /**
-   * It uses to wait until the cluster is started.<br/>
-   * @throws IOException if an I/O error occurs.
-   */
-  public void  waitForClusterToStart() throws 
-      IOException {
-    List<Thread> chkDaemonStart = new ArrayList<Thread>();
-    for (List<AbstractDaemonClient> set : daemons.values()) {
-      for (AbstractDaemonClient daemon : set) {
-        DaemonStartThread dmStart = new DaemonStartThread(daemon);
-        chkDaemonStart.add(dmStart);;
-        dmStart.start();
-      }
-    }
-
-    for (Thread daemonThread : chkDaemonStart){
-      try {
-        daemonThread.join();
-      } catch(InterruptedException intExp) {
-        LOG.warn("Interrupted while thread is joining" + intExp.getMessage());
-      }
-    }
-  }
-
-  /**
-   * It waits for specified amount of time.
-   * @param duration time in milliseconds.
-   * @throws InterruptedException if any thread interrupted the current
-   * thread while it is waiting for a notification.
-   */
-  public void waitFor(long duration) {
-    try {
-      synchronized (waitLock) {
-        waitLock.wait(duration);
-      }
-    } catch (InterruptedException intExp) {
-       LOG.warn("Interrrupeted while thread is waiting" + intExp.getMessage());
-    }
-  }
-  
-  class DaemonStartThread extends Thread {
-    private AbstractDaemonClient daemon;
-
-    public DaemonStartThread(AbstractDaemonClient daemon) {
-      this.daemon = daemon;
-    }
-
-    public void run(){
-      LOG.info("Waiting for Daemon " + daemon.getHostName() 
-          + " to come up.....");
-      while (true) { 
-        try {
-          daemon.ping();
-          LOG.info("Daemon is : " + daemon.getHostName() + " pinging...");
-          break;
-        } catch (Exception exp) {
-          if(LOG.isDebugEnabled()) {
-            LOG.debug(daemon.getHostName() + " is waiting to come up.");
-          }
-          waitFor(60000);
-        }
-      }
-    }
-  }
-  
-  class DaemonStopThread extends Thread {
-    private AbstractDaemonClient daemon;
-
-    public DaemonStopThread(AbstractDaemonClient daemon) {
-      this.daemon = daemon;
-    }
-
-    public void run() {
-      LOG.info("Waiting for Daemon " + daemon.getHostName() 
-          + " to stop.....");
-      while (true) {
-        try {
-          daemon.ping();
-          if(LOG.isDebugEnabled()) {
-            LOG.debug(daemon.getHostName() +" is waiting state to stop.");
-          }
-          waitFor(60000);
-        } catch (Exception exp) {
-          LOG.info("Daemon is : " + daemon.getHostName() + " stopped...");
-          break;
-        } 
-      }
-    }
-  }
-}
-
diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ControlAction.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ControlAction.java
deleted file mode 100644
index de1b799..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ControlAction.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Writable;
-
-/**
- * Class to represent a control action which can be performed on Daemon.<br/>
- * 
- */
-
-public abstract class ControlAction<T extends Writable> implements Writable {
-
-  private T target;
-
-  /**
-   * Default constructor of the Control Action, sets the Action type to zero. <br/>
-   */
-  public ControlAction() {
-  }
-
-  /**
-   * Constructor which sets the type of the Control action to a specific type. <br/>
-   * 
-   * @param target
-   *          of the control action.
-   */
-  public ControlAction(T target) {
-    this.target = target;
-  }
-
-  /**
-   * Gets the id of the control action <br/>
-   * 
-   * @return target of action
-   */
-  public T getTarget() {
-    return target;
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    target.readFields(in);
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    target.write(out);
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj instanceof ControlAction) {
-      ControlAction<T> other = (ControlAction<T>) obj;
-      return (this.target.equals(other.getTarget()));
-    } else {
-      return false;
-    }
-  }
-  
-  
-  @Override
-  public String toString() {
-    return "Action Target : " + this.target;
-  }
-}
diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/DaemonProtocol.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/DaemonProtocol.java
deleted file mode 100644
index 6cdccc3..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/DaemonProtocol.java
+++ /dev/null
@@ -1,204 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.ipc.VersionedProtocol;
-import org.apache.hadoop.fs.permission.FsPermission;
-
-/**
- * RPC interface of a given Daemon.
- */
-public interface DaemonProtocol extends VersionedProtocol{
-  long versionID = 1L;
-
-  /**
-   * Returns the Daemon configuration.
-   * @return Configuration
-   * @throws IOException in case of errors
-   */
-  Configuration getDaemonConf() throws IOException;
-
-  /**
-   * Check if the Daemon is alive.
-   * 
-   * @throws IOException
-   *           if Daemon is unreachable.
-   */
-  void ping() throws IOException;
-
-  /**
-   * Check if the Daemon is ready to accept RPC connections.
-   * 
-   * @return true if Daemon is ready to accept RPC connection.
-   * @throws IOException in case of errors
-   */
-  boolean isReady() throws IOException;
-
-  /**
-   * Get system level view of the Daemon process.
-   * 
-   * @return returns system level view of the Daemon process.
-   * 
-   * @throws IOException in case of errors
-   */
-  ProcessInfo getProcessInfo() throws IOException;
-  
-  /**
-   * Return a file status object that represents the path.
-   * @param path
-   *          given path
-   * @param local
-   *          whether the path is local or not
-   * @return a FileStatus object
-   * @throws FileNotFoundException when the path does not exist;
-   *         IOException see specific implementation
-   */
-  FileStatus getFileStatus(String path, boolean local) throws IOException;
-
-  /**
-   * Create a file with given permissions in a file system.
-   * @param path - source path where the file has to create.
-   * @param fileName - file name.
-   * @param permission - file permissions.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  void createFile(String path, String fileName, 
-      FsPermission permission, boolean local) throws IOException;
-   
-  /**
-   * Create a folder with given permissions in a file system.
-   * @param path - source path where the file has to be creating.
-   * @param folderName - folder name.
-   * @param permission - folder permissions.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void createFolder(String path, String folderName, 
-      FsPermission permission, boolean local) throws IOException;
-  /**
-   * List the statuses of the files/directories in the given path if the path is
-   * a directory.
-   * 
-   * @param path
-   *          given path
-   * @param local
-   *          whether the path is local or not
-   * @return the statuses of the files/directories in the given patch
-   * @throws IOException in case of errors
-   */
-  FileStatus[] listStatus(String path, boolean local) throws IOException;
-  
-  /**
-   * Enables a particular control action to be performed on the Daemon <br/>
-   * 
-   * @param action is a control action  to be enabled.
-   * 
-   * @throws IOException in case of errors
-   */
-  @SuppressWarnings("unchecked")
-  void sendAction(ControlAction action) throws IOException;
-  
-  /**
-   * Checks if the particular control action has be delivered to the Daemon 
-   * component <br/>
-   * 
-   * @param action to be checked.
-   * 
-   * @return true if action is still in waiting queue of 
-   *          actions to be delivered.
-   * @throws IOException in case of errors
-   */
-  @SuppressWarnings("unchecked")
-  boolean isActionPending(ControlAction action) throws IOException;
-  
-  /**
-   * Removes a particular control action from the list of the actions which the
-   * daemon maintains. <br/>
-   * <i><b>Not to be directly called by Test Case or clients.</b></i>
-   * @param action to be removed
-   * @throws IOException in case of errors
-   */
-  
-  @SuppressWarnings("unchecked")
-  void removeAction(ControlAction action) throws IOException;
-  
-  /**
-   * Clears out the list of control actions on the particular daemon.
-   * <br/>
-   * @throws IOException in case of errors
-   */
-  void clearActions() throws IOException;
-  
-  /**
-   * Gets a list of pending actions which are targeted on the specified key. 
-   * <br/>
-   * <i><b>Not to be directly used by clients</b></i>
-   * @param key target
-   * @return list of actions.
-   * @throws IOException in case of errors
-   */
-  @SuppressWarnings("unchecked")
-  ControlAction[] getActions(Writable key) throws IOException;
-
-  /**
-   * Gets the number of times a particular pattern has been found in the 
-   * daemons log file.<br/>
-   * <b><i>Please note that search spans across all previous messages of
-   * Daemon, so better practice is to get previous counts before an operation
-   * and then re-check if the sequence of action has caused any problems</i></b>
-   * @param pattern to look for in the damon's log file
-   * @param list of exceptions to ignore
-   * @return number of times the pattern if found in log file.
-   * @throws IOException in case of errors
-   */
-  int getNumberOfMatchesInLogFile(String pattern, String[] list) 
-      throws IOException;
-
-  /**
-   * Gets the user who started the particular daemon initially. <br/>
-   * 
-   * @return user who started the particular daemon.
-   * @throws IOException in case of errors
-   */
-  String getDaemonUser() throws IOException;
-
-  /**
-   * It uses for suspending the process.
-   * @param pid process id.
-   * @return true if the process is suspended otherwise false.
-   * @throws IOException if an I/O error occurs.
-   */
-  boolean suspendProcess(String pid) throws IOException;
-
-  /**
-   * It uses for resuming the suspended process.
-   * @param pid process id
-   * @return true if suspended process is resumed otherwise false.
-   * @throws IOException if an I/O error occurs.
-   */
-  boolean resumeProcess(String pid) throws IOException;
-}
diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfo.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfo.java
deleted file mode 100644
index 22b3855..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfo.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.util.Map;
-
-import org.apache.hadoop.io.Writable;
-
-/**
- * Daemon system level process information.
- */
-public interface ProcessInfo extends Writable {
-  /**
-   * Get the current time in the millisecond.<br/>
-   * 
-   * @return current time on daemon clock in millisecond.
-   */
-  public long currentTimeMillis();
-
-  /**
-   * Get the environment that was used to start the Daemon process.<br/>
-   * 
-   * @return the environment variable list.
-   */
-  public Map<String,String> getEnv();
-
-  /**
-   * Get the System properties of the Daemon process.<br/>
-   * 
-   * @return the properties list.
-   */
-  public Map<String,String> getSystemProperties();
-
-  /**
-   * Get the number of active threads in Daemon VM.<br/>
-   * 
-   * @return number of active threads in Daemon VM.
-   */
-  public int activeThreadCount();
-
-  /**
-   * Get the maximum heap size that is configured for the Daemon VM. <br/>
-   * 
-   * @return maximum heap size.
-   */
-  public long maxMemory();
-
-  /**
-   * Get the free memory in Daemon VM.<br/>
-   * 
-   * @return free memory.
-   */
-  public long freeMemory();
-
-  /**
-   * Get the total used memory in Demon VM. <br/>
-   * 
-   * @return total used memory.
-   */
-  public long totalMemory();
-}
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfoImpl.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfoImpl.java
deleted file mode 100644
index c32666d..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfoImpl.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-
-public class ProcessInfoImpl implements ProcessInfo {
-
-  private int threadCount;
-  private long currentTime;
-  private long freemem;
-  private long maxmem;
-  private long totmem;
-  private Map<String, String> env;
-  private Map<String, String> props;
-
-  public ProcessInfoImpl() {
-    env = new HashMap<String, String>();
-    props = new HashMap<String, String>();
-  }
-
-  /**
-   * Construct a concrete process information object. <br/>
-   * 
-   * @param threadCount
-   *          count of threads.
-   * @param currentTime
-   * @param freemem
-   * @param maxmem
-   * @param totmem
-   * @param env environment list.
-   * @param props
-   */
-  public ProcessInfoImpl(int threadCount, long currentTime, long freemem,
-      long maxmem, long totmem, Map<String, String> env, 
-      Map<String, String> props) {
-    this.threadCount = threadCount;
-    this.currentTime = currentTime;
-    this.freemem = freemem;
-    this.maxmem = maxmem;
-    this.totmem = totmem;
-    this.env = env;
-    this.props = props;
-  }
-
-  @Override
-  public int activeThreadCount() {
-    return threadCount;
-  }
-
-  @Override
-  public long currentTimeMillis() {
-    return currentTime;
-  }
-
-  @Override
-  public long freeMemory() {
-    return freemem;
-  }
-
-  @Override
-  public Map<String, String> getEnv() {
-    return env;
-  }
-
-  @Override
-  public Map<String,String> getSystemProperties() {
-    return props;
-  }
-
-  @Override
-  public long maxMemory() {
-    return maxmem;
-  }
-
-  @Override
-  public long totalMemory() {
-    return totmem;
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    this.threadCount = in.readInt();
-    this.currentTime = in.readLong();
-    this.freemem = in.readLong();
-    this.maxmem = in.readLong();
-    this.totmem = in.readLong();
-    read(in, env);
-    read(in, props);
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    out.writeInt(threadCount);
-    out.writeLong(currentTime);
-    out.writeLong(freemem);
-    out.writeLong(maxmem);
-    out.writeLong(totmem);
-    write(out, env);
-    write(out, props);
-  }
-
-  private void read(DataInput in, Map<String, String> map) throws IOException {
-    int size = in.readInt();
-    for (int i = 0; i < size; i = i + 2) {
-      String key = in.readUTF();
-      String value = in.readUTF();
-      map.put(key, value);
-    }
-  }
-
-  private void write(DataOutput out, Map<String, String> map) 
-  throws IOException {
-    int size = (map.size() * 2);
-    out.writeInt(size);
-    for (Map.Entry<String, String> entry : map.entrySet()) {
-      out.writeUTF(entry.getKey());
-      out.writeUTF(entry.getValue());
-    }
-  }
-
-  @Override
-  public String toString() {
-    StringBuffer strBuf = new StringBuffer();
-    strBuf.append(String.format("active threads : %d\n", threadCount));
-    strBuf.append(String.format("current time  : %d\n", currentTime));
-    strBuf.append(String.format("free memory  : %d\n", freemem));
-    strBuf.append(String.format("total memory  : %d\n", totmem));
-    strBuf.append(String.format("max memory  : %d\n", maxmem));
-    strBuf.append("Environment Variables : \n");
-    for (Map.Entry<String, String> entry : env.entrySet()) {
-      strBuf.append(String.format("key : %s value : %s \n", entry.getKey(),
-          entry.getValue()));
-    }
-    return strBuf.toString();
-  }
-
-}
diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProxyUserDefinitions.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProxyUserDefinitions.java
deleted file mode 100644
index c9d6be4..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProxyUserDefinitions.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.system;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.io.IOException;
-import java.net.URI;
-
-/**
- *  Its the data container which contains host names and
- *  groups against each proxy user.
- */
-public abstract class ProxyUserDefinitions {
-
-  /**
-   *  Groups and host names container
-   */
-  public class GroupsAndHost {
-    private List<String> groups;
-    private List<String> hosts;
-    public List<String> getGroups() {
-      return groups;
-    }
-    public void setGroups(List<String> groups) {
-      this.groups = groups;
-    }
-    public List<String> getHosts() {
-      return hosts;
-    }
-    public void setHosts(List<String> hosts) {
-      this.hosts = hosts;
-    }
-  }
-
-  protected Map<String, GroupsAndHost> proxyUsers;
-  protected ProxyUserDefinitions () {
-    proxyUsers = new HashMap<String, GroupsAndHost>();
-  }
-
-  /**
-   * Add proxy user data to a container.
-   * @param userName - proxy user name.
-   * @param definitions - groups and host names.
-   */
-  public void addProxyUser (String userName, GroupsAndHost definitions) {
-    proxyUsers.put(userName, definitions);
-  }
-
-  /**
-   * Get the host names and groups against given proxy user.
-   * @return - GroupsAndHost object.
-   */
-  public GroupsAndHost getProxyUser (String userName) {
-    return proxyUsers.get(userName);
-  }
-
-  /**
-   * Get the Proxy users data which contains the host names
-   * and groups against each user.
-   * @return - the proxy users data as hash map.
-   */
-  public Map<String, GroupsAndHost> getProxyUsers () {
-    return proxyUsers;
-  }
-
-  /**
-   * The implementation of this method has to be provided by a child of the class
-   * @param filePath
-   * @return
-   * @throws IOException
-   */
-  public abstract boolean writeToFile(URI filePath) throws IOException;
-}
diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/ClusterProcessManager.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/ClusterProcessManager.java
deleted file mode 100644
index 70dd414..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/ClusterProcessManager.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system.process;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-
-/**
- * Interface to manage the remote processes in the cluster.
- */
-public interface ClusterProcessManager {
-
-  /**
-   * Initialization method to pass the configuration object which is required 
-   * by the ClusterProcessManager to manage the cluster.<br/>
-   * Configuration object should typically contain all the parameters which are 
-   * required by the implementations.<br/>
-   *  
-   * @param conf configuration containing values of the specific keys which 
-   * are required by the implementation of the cluster process manger.
-   * 
-   * @throws IOException when initialization fails.
-   */
-  void init(Configuration conf) throws IOException;
-
-  /**
-   * Get the list of RemoteProcess handles of all the remote processes.
-   */
-  List<RemoteProcess> getAllProcesses();
-
-  /**
-   * Get all the roles this cluster's daemon processes have.
-   */
-  Set<Enum<?>> getRoles();
-
-  /**
-   * Method to start all the remote daemons.<br/>
-   * 
-   * @throws IOException if startup procedure fails.
-   */
-  void start() throws IOException;
-
-  /**
-   * Starts the daemon from the user specified conf dir.
-   * @param newConfLocation the dir where the new conf files reside.
-   * @throws IOException if start from new conf fails. 
-   */
-  void start(String newConfLocation) throws IOException;
-
-  /**
-   * Stops the daemon running from user specified conf dir.
-   * 
-   * @param newConfLocation the dir where the new conf files reside.
-   * @throws IOException if stop from new conf fails. 
-   */
-  void stop(String newConfLocation) throws IOException;
-
-  /**
-   * Method to shutdown all the remote daemons.<br/>
-   * 
-   * @throws IOException if shutdown procedure fails.
-   */
-  void stop() throws IOException;
-  
-  /**
-   * Gets if multi-user support is enabled for this cluster. 
-   * <br/>
-   * @return true if multi-user support is enabled.
-   * @throws IOException if RPC returns error. 
-   */
-  boolean isMultiUserSupported() throws IOException;
-
-  /**
-   * The pushConfig is used to push a new config to the daemons.
-   * @param localDir
-   * @return is the remoteDir location where config will be pushed
-   * @throws IOException if pushConfig fails.
-   */
-  String pushConfig(String localDir) throws IOException;
-}
diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/HadoopDaemonRemoteCluster.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/HadoopDaemonRemoteCluster.java
deleted file mode 100644
index d3e5d63..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/HadoopDaemonRemoteCluster.java
+++ /dev/null
@@ -1,404 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system.process;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileReader;
-import java.io.IOException;
-import java.net.InetAddress;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.util.Shell.ShellCommandExecutor;
-
-/**
- * The concrete class which implements the start up and shut down based routines
- * based on the hadoop-daemon.sh. <br/>
- * 
- * Class requires two keys to be present in the Configuration objects passed to
- * it. Look at <code>CONF_HADOOPHOME</code> and
- * <code>CONF_HADOOPCONFDIR</code> for the names of the
- * configuration keys.
- * 
- * Following will be the format which the final command execution would look : 
- * <br/>
- * <code>
- *  ssh host 'hadoop-home/bin/hadoop-daemon.sh --script scriptName 
- *  --config HADOOP_CONF_DIR (start|stop) command'
- * </code>
- */
-public abstract class HadoopDaemonRemoteCluster 
-    implements ClusterProcessManager {
-
-  private static final Log LOG = LogFactory
-      .getLog(HadoopDaemonRemoteCluster.class.getName());
-
-  public static final String CONF_HADOOPNEWCONFDIR =
-    "test.system.hdrc.hadoopnewconfdir";
-  /**
-   * Key used to configure the HADOOP_PREFIX to be used by the
-   * HadoopDaemonRemoteCluster.
-   */
-  public final static String CONF_HADOOPHOME =
-    "test.system.hdrc.hadoophome";
-
-  public final static String CONF_SCRIPTDIR =
-    "test.system.hdrc.deployed.scripts.dir";
-  /**
-   * Key used to configure the HADOOP_CONF_DIR to be used by the
-   * HadoopDaemonRemoteCluster.
-   */
-  public final static String CONF_HADOOPCONFDIR = 
-    "test.system.hdrc.hadoopconfdir";
-
-  public final static String CONF_DEPLOYED_HADOOPCONFDIR =
-    "test.system.hdrc.deployed.hadoopconfdir";
-
-  private String hadoopHome;
-  protected String hadoopConfDir;
-  protected String scriptsDir;
-  protected String hadoopNewConfDir;
-  private final Set<Enum<?>> roles;
-  private final List<HadoopDaemonInfo> daemonInfos;
-  private List<RemoteProcess> processes;
-  protected Configuration conf;
-  
-  public static class HadoopDaemonInfo {
-    public final String cmd;
-    public final Enum<?> role;
-    public final List<String> hostNames;
-    public HadoopDaemonInfo(String cmd, Enum<?> role, List<String> hostNames) {
-      super();
-      this.cmd = cmd;
-      this.role = role;
-      this.hostNames = hostNames;
-    }
-
-    public HadoopDaemonInfo(String cmd, Enum<?> role, String hostFile) 
-        throws IOException {
-      super();
-      this.cmd = cmd;
-      this.role = role;
-      File file = new File(getDeployedHadoopConfDir(), hostFile);
-      BufferedReader reader = null;
-      hostNames = new ArrayList<String>();
-      try {
-        reader = new BufferedReader(new FileReader(file));
-        String host = null;
-        while ((host = reader.readLine()) != null) {
-          if (host.trim().isEmpty() || host.startsWith("#")) {
-            // Skip empty and possible comment lines
-            // throw new IllegalArgumentException(
-            // "Hostname could not be found in file " + hostFile);
-            continue;
-          }
-          hostNames.add(host.trim());
-        }
-        if (hostNames.size() < 1) {
-          throw new IllegalArgumentException("At least one hostname "
-              +
-            "is required to be present in file - " + hostFile);
-        }
-      } finally {
-        try {
-          reader.close();
-        } catch (IOException e) {
-          LOG.warn("Could not close reader");
-        }
-      }
-      LOG.info("Created HadoopDaemonInfo for " + cmd + " " + role + " from " 
-          + hostFile);
-    }
-  }
-
-  @Override
-  public String pushConfig(String localDir) throws IOException {
-    for (RemoteProcess process : processes){
-      process.pushConfig(localDir);
-    }
-    return hadoopNewConfDir;
-  }
-
-  public HadoopDaemonRemoteCluster(List<HadoopDaemonInfo> daemonInfos) {
-    this.daemonInfos = daemonInfos;
-    this.roles = new HashSet<Enum<?>>();
-    for (HadoopDaemonInfo info : daemonInfos) {
-      this.roles.add(info.role);
-    }
-  }
-
-  @Override
-  public void init(Configuration conf) throws IOException {
-    this.conf = conf;
-    populateDirectories(conf);
-    this.processes = new ArrayList<RemoteProcess>();
-    populateDaemons();
-  }
-
-  @Override
-  public List<RemoteProcess> getAllProcesses() {
-    return processes;
-  }
-
-  @Override
-  public Set<Enum<?>> getRoles() {
-    return roles;
-  }
-
-  /**
-   * Method to populate the hadoop home and hadoop configuration directories.
-   * 
-   * @param conf
-   *          Configuration object containing values for
-   *          CONF_HADOOPHOME and
-   *          CONF_HADOOPCONFDIR
-   * 
-   * @throws IllegalArgumentException
-   *           if the configuration or system property set does not contain
-   *           values for the required keys.
-   */
-  protected void populateDirectories(Configuration conf) {
-    hadoopHome = conf.get(CONF_HADOOPHOME);
-    hadoopConfDir = conf.get(CONF_HADOOPCONFDIR);
-    scriptsDir = conf.get(CONF_SCRIPTDIR);
-    hadoopNewConfDir = conf.get(CONF_HADOOPNEWCONFDIR);
-    if (hadoopHome == null || hadoopConfDir == null || hadoopHome.isEmpty()
-        || hadoopConfDir.isEmpty()) {
-      LOG.error("No configuration "
-          + "for the HADOOP_PREFIX and HADOOP_CONF_DIR passed");
-      throw new IllegalArgumentException(
-          "No Configuration passed for hadoop home " +
-          "and hadoop conf directories");
-    }
-  }
-
-  public static String getDeployedHadoopConfDir() {
-    String dir = System.getProperty(CONF_DEPLOYED_HADOOPCONFDIR);
-    if (dir == null || dir.isEmpty()) {
-      LOG.error("No configuration "
-          + "for the CONF_DEPLOYED_HADOOPCONFDIR passed");
-      throw new IllegalArgumentException(
-          "No Configuration passed for hadoop deployed conf directory");
-    }
-    return dir;
-  }
-
-  @Override
-  public void start() throws IOException {
-    for (RemoteProcess process : processes) {
-      process.start();
-    }
-  }
-
-  @Override
-  public void start(String newConfLocation)throws IOException {
-    for (RemoteProcess process : processes) {
-      process.start(newConfLocation);
-    }
-  }
-
-  @Override
-  public void stop() throws IOException {
-    for (RemoteProcess process : processes) {
-      process.kill();
-    }
-  }
-
-  @Override
-  public void stop(String newConfLocation) throws IOException {
-    for (RemoteProcess process : processes) {
-      process.kill(newConfLocation);
-    }
-  }
-
-  protected void populateDaemon(HadoopDaemonInfo info) throws IOException {
-    for (String host : info.hostNames) {
-      InetAddress addr = InetAddress.getByName(host);
-      RemoteProcess process = getProcessManager(info, 
-          addr.getCanonicalHostName());
-      processes.add(process);
-    }
-  }
-
-  protected void populateDaemons() throws IOException {
-   for (HadoopDaemonInfo info : daemonInfos) {
-     populateDaemon(info);
-   }
-  }
-
-  @Override
-  public boolean isMultiUserSupported() throws IOException {
-    return false;
-  }
-
-  protected RemoteProcess getProcessManager(
-      HadoopDaemonInfo info, String hostName) {
-    RemoteProcess process = new ScriptDaemon(info.cmd, hostName, info.role);
-    return process;
-  }
-
-  /**
-   * The core daemon class which actually implements the remote process
-   * management of actual daemon processes in the cluster.
-   * 
-   */
-  class ScriptDaemon implements RemoteProcess {
-
-    private static final String STOP_COMMAND = "stop";
-    private static final String START_COMMAND = "start";
-    private static final String SCRIPT_NAME = "hadoop-daemon.sh";
-    private static final String PUSH_CONFIG ="pushConfig.sh";
-    protected final String daemonName;
-    protected final String hostName;
-    private final Enum<?> role;
-
-    public ScriptDaemon(String daemonName, String hostName, Enum<?> role) {
-      this.daemonName = daemonName;
-      this.hostName = hostName;
-      this.role = role;
-    }
-
-    @Override
-    public String getHostName() {
-      return hostName;
-    }
-
-    private String[] getPushConfigCommand(String localDir, String remoteDir,
-        File scriptDir) throws IOException{
-      ArrayList<String> cmdArgs = new ArrayList<String>();
-      cmdArgs.add(scriptDir.getAbsolutePath() + File.separator + PUSH_CONFIG);
-      cmdArgs.add(localDir);
-      cmdArgs.add(hostName);
-      cmdArgs.add(remoteDir);
-      cmdArgs.add(hadoopConfDir);
-      return (String[]) cmdArgs.toArray(new String[cmdArgs.size()]);
-    }
-
-    private ShellCommandExecutor buildPushConfig(String local, String remote )
-        throws IOException {
-      File scriptDir = new File(scriptsDir);
-      String[] commandArgs = getPushConfigCommand(local, remote, scriptDir);
-      HashMap<String, String> env = new HashMap<String, String>();
-      ShellCommandExecutor executor = new ShellCommandExecutor(commandArgs,
-          scriptDir, env);
-      LOG.info(executor.toString());
-      return executor;
-    }
-
-    private ShellCommandExecutor createNewConfDir() throws IOException {
-      ArrayList<String> cmdArgs = new ArrayList<String>();
-      cmdArgs.add("ssh");
-      cmdArgs.add(hostName);
-      cmdArgs.add("if [ -d "+ hadoopNewConfDir+
-          " ];\n then echo Will remove existing directory;  rm -rf "+
-          hadoopNewConfDir+";\nmkdir "+ hadoopNewConfDir+"; else \n"+
-          "echo " + hadoopNewConfDir + " doesnt exist hence creating" +
-          ";  mkdir " + hadoopNewConfDir + ";\n  fi");
-      String[] cmd = (String[]) cmdArgs.toArray(new String[cmdArgs.size()]);
-      ShellCommandExecutor executor = new ShellCommandExecutor(cmd);
-      LOG.info(executor.toString());
-      return executor;
-    }
-
-    @Override
-    public void pushConfig(String localDir) throws IOException {
-      createNewConfDir().execute();
-      buildPushConfig(localDir, hadoopNewConfDir).execute();
-    }
-
-    private ShellCommandExecutor buildCommandExecutor(String command,
-        String confDir) {
-      String[] commandArgs = getCommand(command, confDir);
-      File cwd = new File(".");
-      HashMap<String, String> env = new HashMap<String, String>();
-      env.put("HADOOP_CONF_DIR", confDir);
-      ShellCommandExecutor executor
-        = new ShellCommandExecutor(commandArgs, cwd, env);
-      LOG.info(executor.toString());
-      return executor;
-    }
-
-    private File getBinDir() {
-      File binDir = new File(hadoopHome, "bin");
-      return binDir;
-    }
-
-    protected String[] getCommand(String command, String confDir) {
-      ArrayList<String> cmdArgs = new ArrayList<String>();
-      File binDir = getBinDir();
-      cmdArgs.add("ssh");
-      cmdArgs.add(hostName);
-      cmdArgs.add(binDir.getAbsolutePath() + File.separator + SCRIPT_NAME);
-      cmdArgs.add("--config");
-      cmdArgs.add(confDir);
-      // XXX Twenty internal version does not support --script option.
-      cmdArgs.add(command);
-      cmdArgs.add(daemonName);
-      return (String[]) cmdArgs.toArray(new String[cmdArgs.size()]);
-    }
-
-    @Override
-    public void kill() throws IOException {
-      kill(hadoopConfDir);
-    }
-
-    @Override
-    public void start() throws IOException {
-      start(hadoopConfDir);
-    }
-
-    public void start(String newConfLocation) throws IOException {
-      ShellCommandExecutor cme = buildCommandExecutor(START_COMMAND,
-          newConfLocation);
-      cme.execute();
-      String output = cme.getOutput();
-      if (!output.isEmpty()) { //getOutput() never returns null value
-        if (output.toLowerCase().contains("error")) {
-          LOG.warn("Error is detected.");
-          throw new IOException("Start error\n" + output);
-        }
-      }
-    }
-
-    public void kill(String newConfLocation) throws IOException {
-      ShellCommandExecutor cme
-        = buildCommandExecutor(STOP_COMMAND, newConfLocation);
-      cme.execute();
-      String output = cme.getOutput();
-      if (!output.isEmpty()) { //getOutput() never returns null value
-        if (output.toLowerCase().contains("error")) {
-          LOG.info("Error is detected.");
-          throw new IOException("Kill error\n" + output);
-        }
-      }
-    }
-
-    @Override
-    public Enum<?> getRole() {
-      return role;
-    }
-  }
-}
diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/MultiUserHadoopDaemonRemoteCluster.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/MultiUserHadoopDaemonRemoteCluster.java
deleted file mode 100644
index 2f9e215..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/MultiUserHadoopDaemonRemoteCluster.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version
- * 2.0 (the "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-package org.apache.hadoop.test.system.process;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster.HadoopDaemonInfo;
-
-public abstract class MultiUserHadoopDaemonRemoteCluster
-    extends HadoopDaemonRemoteCluster {
-
-  public MultiUserHadoopDaemonRemoteCluster(List<HadoopDaemonInfo> daemonInfos) {
-    super(daemonInfos);
-  }
-
-  @Override
-  protected RemoteProcess getProcessManager(
-      HadoopDaemonInfo info, String hostName) {
-    return new MultiUserScriptDaemon(info.cmd, hostName, info.role);
-  }
-
-  @Override
-  public boolean isMultiUserSupported() throws IOException {
-    return true;
-  }
-
-  class MultiUserScriptDaemon extends ScriptDaemon {
-
-    private static final String MULTI_USER_BINARY_PATH_KEY =
-        "test.system.hdrc.multi-user.binary.path";
-    private static final String MULTI_USER_MANAGING_USER =
-        "test.system.hdrc.multi-user.managinguser.";
-    private String binaryPath;
-    /**
-     * Manging user for a particular daemon is gotten by
-     * MULTI_USER_MANAGING_USER + daemonname
-     */
-    private String mangingUser;
-
-    public MultiUserScriptDaemon(
-        String daemonName, String hostName, Enum<?> role) {
-      super(daemonName, hostName, role);
-      initialize(daemonName);
-    }
-
-    private void initialize(String daemonName) {
-      binaryPath = conf.get(MULTI_USER_BINARY_PATH_KEY);
-      if (binaryPath == null || binaryPath.trim().isEmpty()) {
-        throw new IllegalArgumentException(
-            "Binary path for multi-user path is not present. Please set "
-                + MULTI_USER_BINARY_PATH_KEY + " correctly");
-      }
-      File binaryFile = new File(binaryPath);
-      if (!binaryFile.exists() || !binaryFile.canExecute()) {
-        throw new IllegalArgumentException(
-            "Binary file path is not configured correctly. Please set "
-                + MULTI_USER_BINARY_PATH_KEY
-                + " to properly configured binary file.");
-      }
-      mangingUser = conf.get(MULTI_USER_MANAGING_USER + daemonName);
-      if (mangingUser == null || mangingUser.trim().isEmpty()) {
-        throw new IllegalArgumentException(
-            "Manging user for daemon not present please set : "
-                + MULTI_USER_MANAGING_USER + daemonName + " to correct value.");
-      }
-    }
-
-    @Override
-    protected String[] getCommand(String command,String confDir) {
-      ArrayList<String> commandList = new ArrayList<String>();
-      commandList.add(binaryPath);
-      commandList.add(mangingUser);
-      commandList.add(hostName);
-      commandList.add("--config "
-          + confDir + " " + command + " " + daemonName);
-      return (String[]) commandList.toArray(new String[commandList.size()]);
-    }
-  }
-}
diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/RemoteProcess.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/RemoteProcess.java
deleted file mode 100644
index d0afe16..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/RemoteProcess.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system.process;
-
-import java.io.IOException;
-
-/**
- * Interface to manage the remote process.
- */
-public interface RemoteProcess {
-  /**
-   * Get the host on which the daemon process is running/stopped.<br/>
-   * 
-   * @return hostname on which process is running/stopped.
-   */
-  String getHostName();
-
-  /**
-   * Start a given daemon process.<br/>
-   * 
-   * @throws IOException if startup fails.
-   */
-  void start() throws IOException;
-  /**
-   * Starts a daemon from user specified conf dir. 
-   * @param newConfLocation is dir where new conf resides. 
-   * @throws IOException if start of process fails from new location.
-   */
-  void start(String newConfLocation) throws IOException;
-  /**
-   * Stop a given daemon process.<br/>
-   * 
-   * @throws IOException if shutdown fails.
-   */
-  void kill() throws IOException;
-  
-  /**
-   * Stops a given daemon running from user specified 
-   * conf dir. </br>
-   * @param newConfLocation dir location where new conf resides. 
-   * @throws IOException if kill fails from new conf location.
-   */
-   void kill(String newConfLocation) throws IOException;
-  /**
-   * Get the role of the Daemon in the cluster.
-   * 
-   * @return Enum
-   */
-  Enum<?> getRole();
-  
-  /**
-   * Pushed the configuration to new configuration directory 
-   * @param localDir The local directory which has config files that will be 
-   * pushed to the remote location
-   * @throws IOException is thrown if the pushConfig results in a error. 
-   */
-  void pushConfig(String localDir) throws IOException;
-}
diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/SSHRemoteExecution.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/SSHRemoteExecution.java
deleted file mode 100644
index 704c97d..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/SSHRemoteExecution.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.util;
-
-import com.jcraft.jsch.*;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.util.Properties;
-
-/**
- * Remote Execution of commands  on a remote machine.
- */
-
-public class SSHRemoteExecution implements RemoteExecution {
-
-  static final Log LOG = LogFactory.getLog(SSHRemoteExecution.class);
-  static final int SSH_PORT = 22;
-  static final String DEFAULT_IDENTITY="id_dsa";
-  static final String DEFAULT_KNOWNHOSTS="known_hosts";
-  static final String FS = System.getProperty("file.separator");
-  static final String LS = System.getProperty("line.separator");
-  private int exitCode;
-  private StringBuffer output;
-  private String commandString;
-
-  final StringBuffer errorMessage = new StringBuffer();
-  public SSHRemoteExecution() throws Exception {
-  }
-
-  protected String getHomeDir() {
-    String currentUser=System.getProperty("user.name");
-    String userHome=System.getProperty("user.home");
-
-    return userHome.substring(0, userHome.indexOf(currentUser)-1);
-  }
-
-  /**
-   * Execute command at remote host under given user
-   * @param remoteHostName remote host name
-   * @param user is the name of the user to be login under;
-   *   current user will be used if this is set to <code>null</code>
-   * @param command to be executed remotely
-   * @param identityFile is the name of alternative identity file; default
-   *   is ~user/.ssh/id_dsa
-   * @param portNumber remote SSH daemon port number, default is 22
-   * @throws Exception in case of errors
-   */
-  public void executeCommand (String remoteHostName, String user,
-          String  command, String identityFile, int portNumber) throws Exception {
-    commandString = command;
-    String sessionUser = System.getProperty("user.name");
-    String userHome=System.getProperty("user.home");
-    if (user != null) {
-      sessionUser = user;
-      userHome = getHomeDir() + FS + user;
-    }
-    String dotSSHDir = userHome + FS + ".ssh";
-    String sessionIdentity = dotSSHDir + FS + DEFAULT_IDENTITY;
-    if (identityFile != null) {
-      sessionIdentity = identityFile;
-    }
-
-    JSch jsch = new JSch();
-
-    Session session = jsch.getSession(sessionUser, remoteHostName, portNumber);
-    jsch.setKnownHosts(dotSSHDir + FS + DEFAULT_KNOWNHOSTS);
-    jsch.addIdentity(sessionIdentity);
-
-    Properties config = new Properties();
-    config.put("StrictHostKeyChecking", "no");
-    session.setConfig(config);
-
-    session.connect(30000);   // making a connection with timeout.
-
-    Channel channel=session.openChannel("exec");
-    ((ChannelExec)channel).setCommand(command);
-    channel.setInputStream(null);
-
-    final BufferedReader errReader =
-            new BufferedReader(
-              new InputStreamReader(((ChannelExec)channel).getErrStream()));
-    BufferedReader inReader =
-            new BufferedReader(new InputStreamReader(channel.getInputStream()));
-
-    channel.connect();
-    Thread errorThread = new Thread() {
-      @Override
-      public void run() {
-        try {
-          String line = errReader.readLine();
-          while((line != null) && !isInterrupted()) {
-            errorMessage.append(line);
-            errorMessage.append(LS);
-            line = errReader.readLine();
-          }
-        } catch(IOException ioe) {
-          LOG.warn("Error reading the error stream", ioe);
-        }
-      }
-    };
-
-    try {
-      errorThread.start();
-    } catch (IllegalStateException e) {
-      LOG.debug(e);
-    }
-    try {
-      parseExecResult(inReader);
-      String line = inReader.readLine();
-      while (line != null) {
-        line = inReader.readLine();
-      }
-
-      if(channel.isClosed()) {
-        exitCode = channel.getExitStatus();
-        LOG.debug("exit-status: " + exitCode);
-      }
-      try {
-        // make sure that the error thread exits
-        errorThread.join();
-      } catch (InterruptedException ie) {
-        LOG.warn("Interrupted while reading the error stream", ie);
-      }
-    } catch (Exception ie) {
-      throw new IOException(ie.toString());
-    }
-    finally {
-      try {
-        inReader.close();
-      } catch (IOException ioe) {
-        LOG.warn("Error while closing the input stream", ioe);
-      }
-      try {
-        errReader.close();
-      } catch (IOException ioe) {
-        LOG.warn("Error while closing the error stream", ioe);
-      }
-      channel.disconnect();
-      session.disconnect();
-    }
-  }
-
-  /**
-   * Execute command at remote host under given username
-   * Default identity is ~/.ssh/id_dsa key will be used
-   * Default known_hosts file is ~/.ssh/known_hosts will be used
-   * @param remoteHostName remote host name
-   * @param user is the name of the user to be login under;
-   *   if equals to <code>null</code> then current user name will be used
-   * @param command to be executed remotely
-   */
-  @Override
-  public void executeCommand (String remoteHostName, String user,
-          String  command) throws Exception {
-    executeCommand(remoteHostName, user, command, null, SSH_PORT);
-  }
-
-  @Override
-  public int getExitCode() {
-    return exitCode;
-  }
-
-  protected void parseExecResult(BufferedReader lines) throws IOException {
-    output = new StringBuffer();
-    char[] buf = new char[512];
-    int nRead;
-    while ( (nRead = lines.read(buf, 0, buf.length)) > 0 ) {
-      output.append(buf, 0, nRead);
-    }
-  }
-
-  /** Get the output of the ssh command.*/
-  @Override
-  public String getOutput() {
-    return (output == null) ? "" : output.toString();
-  }
-
-  /** Get the String representation of ssh command */
-  @Override
-  public String getCommandString() {
-    return commandString;
-  }
-}
diff --git a/hadoop-common-project/hadoop-common/src/test/system/scripts/pushConfig.sh b/hadoop-common-project/hadoop-common/src/test/system/scripts/pushConfig.sh
deleted file mode 100644
index 1230f0e..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/scripts/pushConfig.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# local folder with new configuration file
-LOCAL_DIR=$1
-# remote daemon host
-HOST=$2
-#remote dir points to the location of new config files
-REMOTE_DIR=$3
-# remote daemon HADOOP_CONF_DIR location
-DAEMON_HADOOP_CONF_DIR=$4
-
-if [ $# -ne 4 ]; then
-  echo "Wrong number of parameters" >&2
-  exit 2
-fi
-
-ret_value=0
-
-echo The script makes a remote copy of existing ${DAEMON_HADOOP_CONF_DIR} to ${REMOTE_DIR}
-echo and populates it with new configs prepared in $LOCAL_DIR
-
-ssh ${HOST} cp -r ${DAEMON_HADOOP_CONF_DIR}/* ${REMOTE_DIR}
-ret_value=$?
-
-# make sure files are writeble
-ssh ${HOST} chmod u+w ${REMOTE_DIR}/*
-
-# copy new files over
-scp -r ${LOCAL_DIR}/* ${HOST}:${REMOTE_DIR}
-
-err_code=`echo $? + $ret_value | bc`
-echo Copying of files from local to remote returned ${err_code}
-
diff --git a/hadoop-common-project/hadoop-common/src/test/system/validation/org/apache/hadoop/util/TestSSHRemoteExecution.java b/hadoop-common-project/hadoop-common/src/test/system/validation/org/apache/hadoop/util/TestSSHRemoteExecution.java
deleted file mode 100644
index 15eb00c..0000000
--- a/hadoop-common-project/hadoop-common/src/test/system/validation/org/apache/hadoop/util/TestSSHRemoteExecution.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.util;
-
-import static org.junit.Assert.assertEquals;
-import org.junit.Test;
-
-public class TestSSHRemoteExecution {
-  
-  @Test
-  /**
-   * Method: executeCommand(String remoteHostName, String user, String  command)
-   */
-  public void testExecuteCommandForRemoteHostNameUserCommand() throws Exception {
-    String command = "ls -l /bin";
-    SSHRemoteExecution sshRE = new SSHRemoteExecution();
-    sshRE.executeCommand("localhost", null, "ls -l /bin");
-    System.out.println(sshRE.getOutput());
-    assertEquals("Exit code should is expected to be 0", sshRE.getExitCode(), 0);
-    assertEquals("Mismatched command string", sshRE.getCommandString(), command);
-  }
-
-  @Test
-  /**
-   * Method: getHomeDir()
-   */
-  public void testGetHomeDir() throws Exception {
-    SSHRemoteExecution sshRE = new SSHRemoteExecution();
-    String ret = sshRE.getHomeDir();
-    assertEquals(System.getProperty("user.home"),
-      ret + System.getProperty("file.separator") +
-        System.getProperty("user.name"));
-  }
-}
diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml
index 57112b7..2ee82dd 100644
--- a/hadoop-common-project/pom.xml
+++ b/hadoop-common-project/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 1c8bf83..7fc6e56 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index 7b07956..b40044a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -14,7 +14,10 @@
 
 
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
@@ -474,7 +477,7 @@
                     <!-- Using Unix script to preserve file permissions -->
                     <echo file="${project.build.directory}/tomcat-untar.sh">
 
-                      which cygpath 2> /dev/null
+                      which cygpath 2&gt; /dev/null
                       if [ $? = 1 ]; then
                       BUILD_DIR="${project.build.directory}"
                       else
@@ -517,7 +520,7 @@
                     <!-- Using Unix script to preserve symlinks -->
                     <echo file="${project.build.directory}/dist-maketar.sh">
 
-                      which cygpath 2> /dev/null
+                      which cygpath 2&gt; /dev/null
                       if [ $? = 1 ]; then
                       BUILD_DIR="${project.build.directory}"
                       else
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 4b3b587..9e51ce8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -288,10 +288,10 @@
     @Override
     public Void execute(FileSystem fs) throws IOException {
       if (replication == -1) {
-        replication = fs.getDefaultReplication();
+        replication = fs.getDefaultReplication(path);
       }
       if (blockSize == -1) {
-        blockSize = fs.getDefaultBlockSize();
+        blockSize = fs.getDefaultBlockSize(path);
       }
       FsPermission fsPermission = getPermission(permission);
       int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
index 95c4942..cf90485 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
@@ -49,6 +49,7 @@
 import org.apache.hadoop.lib.servlet.FileSystemReleaseFilter;
 import org.apache.hadoop.lib.servlet.HostnameFilter;
 import org.apache.hadoop.lib.wsrs.InputStreamEntity;
+import org.apache.hadoop.security.authentication.server.AuthenticationToken;
 import org.json.simple.JSONObject;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -145,9 +146,15 @@
     String effectiveUser = user.getName();
     if (doAs != null && !doAs.equals(user.getName())) {
       ProxyUser proxyUser = HttpFSServerWebApp.get().get(ProxyUser.class);
-      proxyUser.validate(user.getName(), HostnameFilter.get(), doAs);
+      String proxyUserName;
+      if (user instanceof AuthenticationToken) {
+        proxyUserName = ((AuthenticationToken)user).getUserName();
+      } else {
+        proxyUserName = user.getName();
+      }
+      proxyUser.validate(proxyUserName, HostnameFilter.get(), doAs);
       effectiveUser = doAs;
-      AUDIT_LOG.info("Proxy user [{}] DoAs user [{}]", user.getName(), doAs);
+      AUDIT_LOG.info("Proxy user [{}] DoAs user [{}]", proxyUserName, doAs);
     }
     return effectiveUser;
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a5f068f..cf0fb48 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -13,6 +13,11 @@
 
     HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
 
+    HDFS-744. Support hsync in HDFS. (Lars Hofhansl via szetszwo)
+
+    HDFS-3042. Automatic failover support for NameNode HA (todd)
+    (see dedicated section below for subtask breakdown)
+
   IMPROVEMENTS
 
     HDFS-1620. Rename HdfsConstants -> HdfsServerConstants, FSConstants ->
@@ -77,6 +82,14 @@
     HDFS-3293. Add toString(), equals(..) and hashCode() to JournalInfo.
     (Hari Mankude via szetszwo)
 
+    HDFS-3197. Incorrect class comments in a few tests. (Andy Isaacson via eli)
+
+    HDFS-2391. Newly set BalancerBandwidth value is not displayed anywhere.
+    (harsh)
+
+    HDFS-3476. Correct the default used in TestDFSClientRetries.busyTest()
+    after HDFS-3462 (harsh)
+
   OPTIMIZATIONS
 
     HDFS-2834. Add a ByteBuffer-based read API to DFSInputStream.
@@ -109,9 +122,6 @@
     HDFS-2776. Missing interface annotation on JournalSet. 
     (Brandon Li via jitendra)
 
-    HDFS-2759. Pre-allocate HDFS edit log files after writing version number.
-    (atm)
-
     HDFS-2908. Add apache license header for StorageReport.java. (Brandon Li
     via jitendra)
 
@@ -141,8 +151,163 @@
     HDFS-3243. TestParallelRead timing out on jenkins. (Henry Robinson via todd)
 
     HDFS-3265. PowerPc Build error. (Kumar Ravi via mattf)
+
+    HDFS-2312. FSNamesystem javadoc incorrectly says its for DNs. (harsh)
+
+    HDFS-3163. TestHDFSCLI.testAll fails if the user name is not all lowercase.
+    (Brandon Li via atm)
+
+    HDFS-3368. Missing blocks due to bad DataNodes coming up and down. (shv)
+
+    HDFS-3462. TestDFSClientRetries.busyTest() should restore default
+    xceiver count in the config. (Madhukara Phatak via harsh)
+
+  BREAKDOWN OF HDFS-3042 SUBTASKS
+
+    HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
     
-Release 2.0.0 - UNRELEASED 
+    HDFS-3200. Scope all ZKFC configurations by nameservice (todd)
+    
+    HDFS-3223. add zkfc to hadoop-daemon.sh script (todd)
+    
+    HDFS-3261. TestHASafeMode fails on HDFS-3042 branch (todd)
+    
+    HDFS-3159. Document NN auto-failover setup and configuration (todd)
+    
+    HDFS-3412. Fix findbugs warnings in auto-HA branch (todd)
+    
+    HDFS-3432. TestDFSZKFailoverController tries to fail over too early (todd)
+
+Release 2.0.1-alpha - UNRELEASED
+  
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+    HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG
+    logging is enabled. (atm)
+
+    HDFS-3341. Change minimum RPC versions to respective SNAPSHOTs instead of
+    final releases. (todd)
+
+    HDFS-3369. Rename {get|set|add}INode(..) methods in BlockManager and
+    BlocksMap to {get|set|add}BlockCollection(..).  (John George via szetszwo)
+
+    HDFS-3134. harden edit log loader against malformed or malicious input.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3230. Cleanup DatanodeID creation in the tests. (eli)
+
+    HDFS-3401. Cleanup DatanodeDescriptor creation in the tests. (eli)
+
+    HDFS-3400. DNs should be able start with jsvc even if security is disabled.
+    (atm via eli)
+
+    HDFS-3404. Make putImage in GetImageServlet infer remote address to fetch
+    from request. (atm)
+
+    HDFS-3335. check for edit log corruption at the end of the log
+    (Colin Patrick McCabe via todd)
+
+    HDFS-3417. Rename BalancerDatanode#getName to getDisplayName to be
+    consistent with Datanode. (eli)
+
+    HDFS-3416. Cleanup DatanodeID and DatanodeRegistration
+    constructors used by testing. (eli)
+
+    HDFS-3419. Cleanup LocatedBlock. (eli)
+
+    HDFS-3440. More effectively limit stream memory consumption when reading
+    corrupt edit logs (Colin Patrick McCabe via todd)
+
+    HDFS-3438. BootstrapStandby should not require a rollEdits on active node
+    (todd)
+
+    HDFS-2885. Remove "federation" from the nameservice config options.
+    (Tsz Wo (Nicholas) Sze via eli)
+
+    HDFS-3394. Do not use generic in INodeFile.getLastBlock(): the run-time
+    ClassCastException check is useless since generic type information is only
+    available in compile-time.  (szetszwo)
+
+    HDFS-3454. Balancer unconditionally logs InterruptedException at
+    INFO level on shutdown if security is enabled. (eli)
+
+    HDFS-1013. Miscellaneous improvements to HTML markup for web UIs
+    (Eugene Koontz via todd)
+
+  OPTIMIZATIONS
+
+    HDFS-2982. Startup performance suffers when there are many edit log
+    segments. (Colin Patrick McCabe via todd)
+
+  BUG FIXES
+
+    HDFS-3385. The last block of INodeFileUnderConstruction is not
+    necessarily a BlockInfoUnderConstruction, so do not cast it in
+    FSNamesystem.recoverLeaseInternal(..).  (szetszwo)
+
+    HDFS-3414. Balancer does not find NameNode if rpc-address or
+    servicerpc-address are not set in client configs. (atm)
+
+    HDFS-3031. Fix complete() and getAdditionalBlock() RPCs to be idempotent
+    (todd)
+
+    HDFS-2759. Pre-allocate HDFS edit log files after writing version number.
+    (atm)
+
+    HDFS-3413. TestFailureToReadEdits timing out. (atm)
+
+    HDFS-3422. TestStandbyIsHot timeouts too aggressive (todd)
+
+    HDFS-3433. GetImageServlet should allow administrative requestors when
+    security is enabled. (atm)
+
+    HDFS-1153. dfsnodelist.jsp should handle invalid input parameters.
+    (Ravi Phulari via eli)
+
+    HDFS-3434. InvalidProtocolBufferException when visiting DN
+    browseDirectory.jsp (eli)
+
+    HDFS-2800. Fix cancellation of checkpoints in the standby node to be more
+    reliable. (todd)
+
+    HDFS-3391. Fix InvalidateBlocks to compare blocks including their
+    generation stamps. (todd)
+
+    HDFS-3444. hdfs groups command doesn't work with security enabled. (atm)
+
+    HDFS-3415. Make sure all layout versions are the same for all storage
+    directories in the Namenode.  (Brandon Li via szetszwo)
+
+    HDFS-3436. In DataNode.transferReplicaForPipelineRecovery(..), it should
+    use the stored generation stamp to check if the block is valid.  (Vinay
+    via szetszwo)
+
+    HDFS-3460. HttpFS proxyuser validation with Kerberos ON uses full 
+    principal name. (tucu)
+
+    HDFS-3484. hdfs fsck doesn't work if NN HTTP address is set to
+    0.0.0.0 even if NN RPC address is configured. (atm via eli)
+
+    HDFS-3486. offlineimageviewer can't read fsimage files that contain
+    persistent delegation tokens. (Colin Patrick McCabe via eli)
+
+    HDFS-3487. offlineimageviewer should give byte offset information
+    when it encounters an exception. (Colin Patrick McCabe via eli)
+
+    HDFS-3442. Incorrect count for Missing Replicas in FSCK report. (Andrew
+    Wang via atm)
+
+    HDFS-3501. Checkpointing with security enabled will stop working
+    after ticket lifetime expires. (atm via eli)
+
+    HDFS-3266. DFSTestUtil#waitCorruptReplicas doesn't sleep between checks.
+    (Madhukara Phatak via atm)
+
+Release 2.0.0-alpha - 05-23-2012
 
   INCOMPATIBLE CHANGES
 
@@ -368,9 +533,6 @@
     HDFS-2505. Add a test to verify getFileChecksum(..) with ViewFS.  (Ravi
     Prakash via szetszwo)
 
-    HDFS-3211. Add fence(..) and replace NamenodeRegistration with JournalInfo
-    and epoch in JournalProtocol. (suresh via szetszwo)
-
     HDFS-3240. Drop log level of "heartbeat: ..." in BPServiceActor to DEBUG
     (todd)
 
@@ -435,21 +597,11 @@
     so that INodeFile and INodeFileUnderConstruction do not have to be used in
     block management.  (John George via szetszwo)
 
-    HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG
-    logging is enabled. (atm)
+    HDFS-3211. Add fence(..) and replace NamenodeRegistration with JournalInfo
+    and epoch in JournalProtocol. (suresh via szetszwo)
 
-    HDFS-3341. Change minimum RPC versions to respective SNAPSHOTs instead of
-    final releases. (todd)
-
-    HDFS-3369. Rename {get|set|add}INode(..) methods in BlockManager and
-    BlocksMap to {get|set|add}BlockCollection(..).  (John George via szetszwo)
-
-    HDFS-3134. harden edit log loader against malformed or malicious input.
-    (Colin Patrick McCabe via eli)
-
-    HDFS-3230. Cleanup DatanodeID creation in the tests. (eli)
-
-    HDFS-3401. Cleanup DatanodeDescriptor creation in the tests. (eli)
+    HDFS-3418. Rename BlockWithLocationsProto datanodeIDs field to storageIDs.
+    (eli)
 
   OPTIMIZATIONS
 
@@ -644,9 +796,10 @@
     HDFS-3395. NN doesn't start with HA+security enabled and HTTP address
     set to 0.0.0.0. (atm)
 
-    HDFS-3385. The last block of INodeFileUnderConstruction is not
-    necessarily a BlockInfoUnderConstruction, so do not cast it in
-    FSNamesystem.recoverLeaseInternal(..).  (szetszwo)
+    HDFS-3026. HA: Handle failure during HA state transition. (atm)
+
+    HDFS-860. fuse-dfs truncate behavior causes issues with scp.
+    (Brian Bockelman via eli)
 
   BREAKDOWN OF HDFS-1623 SUBTASKS
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 31a38c7..3461d6a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -6,6 +6,9 @@
        <Package name="org.apache.hadoop.hdfs.protocol.proto" />
      </Match>
      <Match>
+       <Package name="org.apache.hadoop.hdfs.server.namenode.ha.proto" />
+     </Match>
+     <Match>
        <Bug pattern="EI_EXPOSE_REP" />
      </Match>
      <Match>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 6717c49..a0878df 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
@@ -100,6 +103,33 @@
       <artifactId>ant</artifactId>
       <scope>provided</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.zookeeper</groupId>
+      <artifactId>zookeeper</artifactId>
+      <version>3.4.2</version>
+      <exclusions>
+        <exclusion>
+          <!-- otherwise seems to drag in junit 3.8.1 via jline -->
+          <groupId>junit</groupId>
+          <artifactId>junit</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jdmk</groupId>
+          <artifactId>jmxtools</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jmx</groupId>
+          <artifactId>jmxri</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.zookeeper</groupId>
+      <artifactId>zookeeper</artifactId>
+      <version>3.4.2</version>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
 
   <build>
@@ -247,7 +277,7 @@
                 <echo file="target/compile-proto.sh">
                     PROTO_DIR=src/main/proto
                     JAVA_DIR=target/generated-sources/java
-                    which cygpath 2> /dev/null
+                    which cygpath 2&gt; /dev/null
                     if [ $? = 1 ]; then
                       IS_WIN=false
                     else
@@ -255,8 +285,8 @@
                       WIN_PROTO_DIR=`cygpath --windows $PROTO_DIR`
                       WIN_JAVA_DIR=`cygpath --windows $JAVA_DIR`
                     fi
-                    mkdir -p $JAVA_DIR 2> /dev/null
-                    for PROTO_FILE in `ls $PROTO_DIR/*.proto 2> /dev/null`
+                    mkdir -p $JAVA_DIR 2&gt; /dev/null
+                    for PROTO_FILE in `ls $PROTO_DIR/*.proto 2&gt; /dev/null`
                     do
                         if [ "$IS_WIN" = "true" ]; then
                           protoc -I$WIN_PROTO_DIR --java_out=$WIN_JAVA_DIR $PROTO_FILE
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
index 380ef62..94ce6bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
@@ -55,6 +58,12 @@
       <scope>test</scope>
     </dependency>
     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
       <groupId>org.apache.bookkeeper</groupId>
       <artifactId>bookkeeper-server</artifactId>
       <scope>compile</scope>
@@ -64,6 +73,11 @@
       <artifactId>junit</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
   <profiles>
     <profile>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
index 9d070d9..2374cd8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
@@ -28,6 +28,7 @@
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader;
 import org.apache.bookkeeper.client.LedgerHandle;
 import org.apache.bookkeeper.client.LedgerEntry;
+import org.apache.bookkeeper.client.BKException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -41,6 +42,7 @@
   private final long firstTxId;
   private final long lastTxId;
   private final int logVersion;
+  private final boolean inProgress;
   private final LedgerHandle lh;
 
   private final FSEditLogOp.Reader reader;
@@ -69,22 +71,28 @@
     this.firstTxId = metadata.getFirstTxId();
     this.lastTxId = metadata.getLastTxId();
     this.logVersion = metadata.getVersion();
+    this.inProgress = metadata.isInProgress();
 
+    if (firstBookKeeperEntry < 0
+        || firstBookKeeperEntry > lh.getLastAddConfirmed()) {
+      throw new IOException("Invalid first bk entry to read: "
+          + firstBookKeeperEntry + ", LAC: " + lh.getLastAddConfirmed());
+    }
     BufferedInputStream bin = new BufferedInputStream(
         new LedgerInputStream(lh, firstBookKeeperEntry));
     tracker = new FSEditLogLoader.PositionTrackingInputStream(bin);
     DataInputStream in = new DataInputStream(tracker);
 
-    reader = new FSEditLogOp.Reader(in, logVersion);
+    reader = new FSEditLogOp.Reader(in, tracker, logVersion);
   }
 
   @Override
-  public long getFirstTxId() throws IOException {
+  public long getFirstTxId() {
     return firstTxId;
   }
 
   @Override
-  public long getLastTxId() throws IOException {
+  public long getLastTxId() {
     return lastTxId;
   }
   
@@ -102,8 +110,10 @@
   public void close() throws IOException {
     try {
       lh.close();
-    } catch (Exception e) {
+    } catch (BKException e) {
       throw new IOException("Exception closing ledger", e);
+    } catch (InterruptedException e) {
+      throw new IOException("Interrupted closing ledger", e);
     }
   }
 
@@ -123,10 +133,28 @@
         lh.toString(), firstTxId, lastTxId);
   }
 
-  // TODO(HA): Test this.
   @Override
   public boolean isInProgress() {
-    return true;
+    return inProgress;
+  }
+
+  /**
+   * Skip forward to specified transaction id.
+   * Currently we do this by just iterating forward.
+   * If this proves to be too expensive, this can be reimplemented
+   * with a binary search over bk entries
+   */
+  public void skipTo(long txId) throws IOException {
+    long numToSkip = getFirstTxId() - txId;
+
+    FSEditLogOp op = null;
+    for (long i = 0; i < numToSkip; i++) {
+      op = readOp();
+    }
+    if (op != null && op.getTransactionId() != txId-1) {
+      throw new IOException("Corrupt stream, expected txid "
+          + (txId-1) + ", got " + op.getTransactionId());
+    }
   }
 
   /**
@@ -148,11 +176,8 @@
         throws IOException {
       this.lh = lh;
       readEntries = firstBookKeeperEntry;
-      try {
-        maxEntry = lh.getLastAddConfirmed();
-      } catch (Exception e) {
-        throw new IOException("Error reading last entry id", e);
-      }
+
+      maxEntry = lh.getLastAddConfirmed();
     }
 
     /**
@@ -173,8 +198,10 @@
             assert !entries.hasMoreElements();
             return e.getEntryInputStream();
         }
-      } catch (Exception e) {
+      } catch (BKException e) {
         throw new IOException("Error reading entries from bookkeeper", e);
+      } catch (InterruptedException e) {
+        throw new IOException("Interrupted reading entries from bookkeeper", e);
       }
       return null;
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogOutputStream.java
index ddbe0b6..6267871 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogOutputStream.java
@@ -33,6 +33,9 @@
 import org.apache.hadoop.io.DataOutputBuffer;
 import java.io.IOException;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
 /**
  * Output stream for BookKeeper Journal.
  * Multiple complete edit log entries are packed into a single bookkeeper
@@ -44,20 +47,22 @@
  */
 class BookKeeperEditLogOutputStream
   extends EditLogOutputStream implements AddCallback {
+  static final Log LOG = LogFactory.getLog(BookKeeperEditLogOutputStream.class);
+
   private final DataOutputBuffer bufCurrent;
   private final AtomicInteger outstandingRequests;
   private final int transmissionThreshold;
   private final LedgerHandle lh;
   private CountDownLatch syncLatch;
-  private final WriteLock wl;
+  private final AtomicInteger transmitResult
+    = new AtomicInteger(BKException.Code.OK);
   private final Writer writer;
 
   /**
    * Construct an edit log output stream which writes to a ledger.
 
    */
-  protected BookKeeperEditLogOutputStream(Configuration conf,
-                                          LedgerHandle lh, WriteLock wl)
+  protected BookKeeperEditLogOutputStream(Configuration conf, LedgerHandle lh)
       throws IOException {
     super();
 
@@ -65,8 +70,6 @@
     outstandingRequests = new AtomicInteger(0);
     syncLatch = null;
     this.lh = lh;
-    this.wl = wl;
-    this.wl.acquire();
     this.writer = new Writer(bufCurrent);
     this.transmissionThreshold
       = conf.getInt(BookKeeperJournalManager.BKJM_OUTPUT_BUFFER_SIZE,
@@ -101,7 +104,6 @@
       throw new IOException("BookKeeper error during abort", bke);
     }
 
-    wl.release();
   }
 
   @Override
@@ -111,8 +113,6 @@
 
   @Override
   public void write(FSEditLogOp op) throws IOException {
-    wl.checkWriteLock();
-
     writer.writeOp(op);
 
     if (bufCurrent.getLength() > transmissionThreshold) {
@@ -122,25 +122,26 @@
 
   @Override
   public void setReadyToFlush() throws IOException {
-    wl.checkWriteLock();
-
     transmit();
 
-    synchronized(this) {
+    synchronized (this) {
       syncLatch = new CountDownLatch(outstandingRequests.get());
     }
   }
 
   @Override
   public void flushAndSync() throws IOException {
-    wl.checkWriteLock();
-
     assert(syncLatch != null);
     try {
       syncLatch.await();
     } catch (InterruptedException ie) {
       throw new IOException("Interrupted waiting on latch", ie);
     }
+    if (transmitResult.get() != BKException.Code.OK) {
+      throw new IOException("Failed to write to bookkeeper; Error is ("
+                            + transmitResult.get() + ") "
+                            + BKException.getMessage(transmitResult.get()));
+    }
 
     syncLatch = null;
     // wait for whatever we wait on
@@ -152,8 +153,12 @@
    * are never called at the same time.
    */
   private void transmit() throws IOException {
-    wl.checkWriteLock();
-
+    if (!transmitResult.compareAndSet(BKException.Code.OK,
+                                     BKException.Code.OK)) {
+      throw new IOException("Trying to write to an errored stream;"
+          + " Error code : (" + transmitResult.get()
+          + ") " + BKException.getMessage(transmitResult.get()));
+    }
     if (bufCurrent.getLength() > 0) {
       byte[] entry = Arrays.copyOf(bufCurrent.getData(),
                                    bufCurrent.getLength());
@@ -168,6 +173,12 @@
                           long entryId, Object ctx) {
     synchronized(this) {
       outstandingRequests.decrementAndGet();
+      if (!transmitResult.compareAndSet(BKException.Code.OK, rc)) {
+        LOG.warn("Tried to set transmit result to (" + rc + ") \""
+            + BKException.getMessage(rc) + "\""
+            + " but is already (" + transmitResult.get() + ") \""
+            + BKException.getMessage(transmitResult.get()) + "\"");
+      }
       CountDownLatch l = syncLatch;
       if (l != null) {
         l.countDown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
index 047efd5..5317a0f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
@@ -37,6 +37,7 @@
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.ZooDefs.Ids;
 
+import java.util.Collection;
 import java.util.Collections;
 import java.util.ArrayList;
 import java.util.List;
@@ -48,7 +49,7 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-
+import com.google.common.annotations.VisibleForTesting;
 /**
  * BookKeeper Journal Manager
  *
@@ -61,7 +62,7 @@
  * </property>
  *
  * <property>
- *   <name>dfs.namenode.edits.journalPlugin.bookkeeper</name>
+ *   <name>dfs.namenode.edits.journal-plugin.bookkeeper</name>
  *   <value>org.apache.hadoop.contrib.bkjournal.BookKeeperJournalManager</value>
  * </property>
  * }
@@ -90,6 +91,10 @@
  *       Default is 2.</li>
  *   <li><b>dfs.namenode.bookkeeperjournal.digestPw</b>
  *       Password to use when creating ledgers. </li>
+ *   <li><b>dfs.namenode.bookkeeperjournal.zk.session.timeout</b>
+ *       Session timeout for Zookeeper client from BookKeeper Journal Manager.
+ *       Hadoop recommends that, this value should be less than the ZKFC 
+ *       session timeout value. Default value is 3000.</li>
  * </ul>
  */
 public class BookKeeperJournalManager implements JournalManager {
@@ -112,11 +117,17 @@
   public static final String BKJM_BOOKKEEPER_DIGEST_PW_DEFAULT = "";
 
   private static final int BKJM_LAYOUT_VERSION = -1;
+  
+  public static final String BKJM_ZK_SESSION_TIMEOUT 
+    = "dfs.namenode.bookkeeperjournal.zk.session.timeout";
+  public static final int BKJM_ZK_SESSION_TIMEOUT_DEFAULT = 3000;
 
-  private final ZooKeeper zkc;
+  private static final String BKJM_EDIT_INPROGRESS = "inprogress_";
+
+  private ZooKeeper zkc;
   private final Configuration conf;
   private final BookKeeper bkc;
-  private final WriteLock wl;
+  private final CurrentInprogress ci;
   private final String ledgerPath;
   private final MaxTxId maxTxId;
   private final int ensembleSize;
@@ -154,14 +165,15 @@
 
     ledgerPath = zkPath + "/ledgers";
     String maxTxIdPath = zkPath + "/maxtxid";
-    String lockPath = zkPath + "/lock";
+    String currentInprogressNodePath = zkPath + "/CurrentInprogress";
     String versionPath = zkPath + "/version";
     digestpw = conf.get(BKJM_BOOKKEEPER_DIGEST_PW,
                         BKJM_BOOKKEEPER_DIGEST_PW_DEFAULT);
 
     try {
       zkConnectLatch = new CountDownLatch(1);
-      zkc = new ZooKeeper(zkConnect, 3000, new ZkConnectionWatcher());
+      zkc = new ZooKeeper(zkConnect, conf.getInt(BKJM_ZK_SESSION_TIMEOUT,
+          BKJM_ZK_SESSION_TIMEOUT_DEFAULT), new ZkConnectionWatcher());
       if (!zkConnectLatch.await(6000, TimeUnit.MILLISECONDS)) {
         throw new IOException("Error connecting to zookeeper");
       }
@@ -187,11 +199,14 @@
 
       bkc = new BookKeeper(new ClientConfiguration(),
                            zkc);
-    } catch (Exception e) {
+    } catch (KeeperException e) {
       throw new IOException("Error initializing zk", e);
+    } catch (InterruptedException ie) {
+      throw new IOException("Interrupted while initializing bk journal manager",
+                            ie);
     }
 
-    wl = new WriteLock(zkc, lockPath);
+    ci = new CurrentInprogress(zkc, currentInprogressNodePath);
     maxTxId = new MaxTxId(zkc, maxTxIdPath);
   }
 
@@ -206,21 +221,34 @@
    */
   @Override
   public EditLogOutputStream startLogSegment(long txId) throws IOException {
-    wl.acquire();
-
     if (txId <= maxTxId.get()) {
       throw new IOException("We've already seen " + txId
           + ". A new stream cannot be created with it");
     }
-    if (currentLedger != null) {
-      throw new IOException("Already writing to a ledger, id="
-                            + currentLedger.getId());
-    }
+
     try {
+      String existingInprogressNode = ci.read();
+      if (null != existingInprogressNode
+          && zkc.exists(existingInprogressNode, false) != null) {
+        throw new IOException("Inprogress node already exists");
+      }
+      if (currentLedger != null) {
+        // bookkeeper errored on last stream, clean up ledger
+        currentLedger.close();
+      }
       currentLedger = bkc.createLedger(ensembleSize, quorumSize,
                                        BookKeeper.DigestType.MAC,
                                        digestpw.getBytes());
-      String znodePath = inprogressZNode();
+    } catch (BKException bke) {
+      throw new IOException("Error creating ledger", bke);
+    } catch (KeeperException ke) {
+      throw new IOException("Error in zookeeper while creating ledger", ke);
+    } catch (InterruptedException ie) {
+      throw new IOException("Interrupted creating ledger", ie);
+    }
+
+    try {
+      String znodePath = inprogressZNode(txId);
       EditLogLedgerMetadata l = new EditLogLedgerMetadata(znodePath,
           HdfsConstants.LAYOUT_VERSION,  currentLedger.getId(), txId);
       /* Write the ledger metadata out to the inprogress ledger znode
@@ -232,20 +260,30 @@
        */
       l.write(zkc, znodePath);
 
-      return new BookKeeperEditLogOutputStream(conf, currentLedger, wl);
-    } catch (Exception e) {
-      if (currentLedger != null) {
-        try {
-          currentLedger.close();
-        } catch (Exception e2) {
-          //log & ignore, an IOException will be thrown soon
-          LOG.error("Error closing ledger", e2);
-        }
-      }
-      throw new IOException("Error creating ledger", e);
+      maxTxId.store(txId);
+      ci.update(znodePath);
+      return new BookKeeperEditLogOutputStream(conf, currentLedger);
+    } catch (KeeperException ke) {
+      cleanupLedger(currentLedger);
+      throw new IOException("Error storing ledger metadata", ke);
     }
   }
 
+  private void cleanupLedger(LedgerHandle lh) {
+    try {
+      long id = currentLedger.getId();
+      currentLedger.close();
+      bkc.deleteLedger(id);
+    } catch (BKException bke) {
+      //log & ignore, an IOException will be thrown soon
+      LOG.error("Error closing ledger", bke);
+    } catch (InterruptedException ie) {
+      LOG.warn("Interrupted while closing ledger", ie);
+    }
+  }
+
+
+
   /**
    * Finalize a log segment. If the journal manager is currently
    * writing to a ledger, ensure that this is the ledger of the log segment
@@ -258,7 +296,7 @@
   @Override
   public void finalizeLogSegment(long firstTxId, long lastTxId)
       throws IOException {
-    String inprogressPath = inprogressZNode();
+    String inprogressPath = inprogressZNode(firstTxId);
     try {
       Stat inprogressStat = zkc.exists(inprogressPath, false);
       if (inprogressStat == null) {
@@ -266,7 +304,6 @@
                               + " doesn't exist");
       }
 
-      wl.checkWriteLock();
       EditLogLedgerMetadata l
         =  EditLogLedgerMetadata.read(zkc, inprogressPath);
 
@@ -303,55 +340,94 @@
       }
       maxTxId.store(lastTxId);
       zkc.delete(inprogressPath, inprogressStat.getVersion());
+      String inprogressPathFromCI = ci.read();
+      if (inprogressPath.equals(inprogressPathFromCI)) {
+        ci.clear();
+      }
     } catch (KeeperException e) {
       throw new IOException("Error finalising ledger", e);
     } catch (InterruptedException ie) {
       throw new IOException("Error finalising ledger", ie);
-    } finally {
-      wl.release();
-    }
+    } 
   }
 
-  // TODO(HA): Handle inProgressOk
-  @Override
-  public EditLogInputStream getInputStream(long fromTxnId, boolean inProgressOk)
+  EditLogInputStream getInputStream(long fromTxId, boolean inProgressOk)
       throws IOException {
-    for (EditLogLedgerMetadata l : getLedgerList()) {
-      if (l.getFirstTxId() == fromTxnId) {
+    for (EditLogLedgerMetadata l : getLedgerList(inProgressOk)) {
+      long lastTxId = l.getLastTxId();
+      if (l.isInProgress()) {
+        lastTxId = recoverLastTxId(l, false);
+      }
+
+      if (fromTxId >= l.getFirstTxId() && fromTxId <= lastTxId) {
         try {
-          LedgerHandle h = bkc.openLedger(l.getLedgerId(),
-                                          BookKeeper.DigestType.MAC,
-                                          digestpw.getBytes());
-          return new BookKeeperEditLogInputStream(h, l);
-        } catch (Exception e) {
-          throw new IOException("Could not open ledger for " + fromTxnId, e);
+          LedgerHandle h;
+          if (l.isInProgress()) { // we don't want to fence the current journal
+            h = bkc.openLedgerNoRecovery(l.getLedgerId(),
+                BookKeeper.DigestType.MAC, digestpw.getBytes());
+          } else {
+            h = bkc.openLedger(l.getLedgerId(), BookKeeper.DigestType.MAC,
+                digestpw.getBytes());
+          }
+          BookKeeperEditLogInputStream s = new BookKeeperEditLogInputStream(h,
+              l);
+          s.skipTo(fromTxId);
+          return s;
+        } catch (BKException e) {
+          throw new IOException("Could not open ledger for " + fromTxId, e);
+        } catch (InterruptedException ie) {
+          throw new IOException("Interrupted opening ledger for "
+                                         + fromTxId, ie);
         }
       }
     }
-    throw new IOException("No ledger for fromTxnId " + fromTxnId + " found.");
+    return null;
   }
 
-  // TODO(HA): Handle inProgressOk
   @Override
-  public long getNumberOfTransactions(long fromTxnId, boolean inProgressOk)
+  public void selectInputStreams(Collection<EditLogInputStream> streams,
+      long fromTxId, boolean inProgressOk) {
+    // NOTE: could probably be rewritten more efficiently
+    while (true) {
+      EditLogInputStream elis;
+      try {
+        elis = getInputStream(fromTxId, inProgressOk);
+      } catch (IOException e) {
+        LOG.error(e);
+        return;
+      }
+      if (elis == null) {
+        return;
+      }
+      streams.add(elis);
+      if (elis.getLastTxId() == HdfsConstants.INVALID_TXID) {
+        return;
+      }
+      fromTxId = elis.getLastTxId() + 1;
+    }
+  }
+
+  long getNumberOfTransactions(long fromTxId, boolean inProgressOk)
       throws IOException {
     long count = 0;
     long expectedStart = 0;
-    for (EditLogLedgerMetadata l : getLedgerList()) {
+    for (EditLogLedgerMetadata l : getLedgerList(inProgressOk)) {
+      long lastTxId = l.getLastTxId();
       if (l.isInProgress()) {
-        long endTxId = recoverLastTxId(l);
-        if (endTxId == HdfsConstants.INVALID_TXID) {
+        lastTxId = recoverLastTxId(l, false);
+        if (lastTxId == HdfsConstants.INVALID_TXID) {
           break;
         }
-        count += (endTxId - l.getFirstTxId()) + 1;
-        break;
       }
 
-      if (l.getFirstTxId() < fromTxnId) {
+      assert lastTxId >= l.getFirstTxId();
+
+      if (lastTxId < fromTxId) {
         continue;
-      } else if (l.getFirstTxId() == fromTxnId) {
-        count = (l.getLastTxId() - l.getFirstTxId()) + 1;
-        expectedStart = l.getLastTxId() + 1;
+      } else if (l.getFirstTxId() <= fromTxId && lastTxId >= fromTxId) {
+        // we can start in the middle of a segment
+        count = (lastTxId - l.getFirstTxId()) + 1;
+        expectedStart = lastTxId + 1;
       } else {
         if (expectedStart != l.getFirstTxId()) {
           if (count == 0) {
@@ -362,8 +438,8 @@
             break;
           }
         }
-        count += (l.getLastTxId() - l.getFirstTxId()) + 1;
-        expectedStart = l.getLastTxId() + 1;
+        count += (lastTxId - l.getFirstTxId()) + 1;
+        expectedStart = lastTxId + 1;
       }
     }
     return count;
@@ -371,26 +447,50 @@
 
   @Override
   public void recoverUnfinalizedSegments() throws IOException {
-    wl.acquire();
-
     synchronized (this) {
       try {
-        EditLogLedgerMetadata l
-          = EditLogLedgerMetadata.read(zkc, inprogressZNode());
-        long endTxId = recoverLastTxId(l);
-        if (endTxId == HdfsConstants.INVALID_TXID) {
-          LOG.error("Unrecoverable corruption has occurred in segment "
-                    + l.toString() + " at path " + inprogressZNode()
-                    + ". Unable to continue recovery.");
-          throw new IOException("Unrecoverable corruption, please check logs.");
+        List<String> children = zkc.getChildren(ledgerPath, false);
+        for (String child : children) {
+          if (!child.startsWith(BKJM_EDIT_INPROGRESS)) {
+            continue;
+          }
+          String znode = ledgerPath + "/" + child;
+          EditLogLedgerMetadata l = EditLogLedgerMetadata.read(zkc, znode);
+          try {
+            long endTxId = recoverLastTxId(l, true);
+            if (endTxId == HdfsConstants.INVALID_TXID) {
+              LOG.error("Unrecoverable corruption has occurred in segment "
+                  + l.toString() + " at path " + znode
+                  + ". Unable to continue recovery.");
+              throw new IOException("Unrecoverable corruption,"
+                  + " please check logs.");
+            }
+            finalizeLogSegment(l.getFirstTxId(), endTxId);
+          } catch (SegmentEmptyException see) {
+            LOG.warn("Inprogress znode " + child
+                + " refers to a ledger which is empty. This occurs when the NN"
+                + " crashes after opening a segment, but before writing the"
+                + " OP_START_LOG_SEGMENT op. It is safe to delete."
+                + " MetaData [" + l.toString() + "]");
+
+            // If the max seen transaction is the same as what would
+            // have been the first transaction of the failed ledger,
+            // decrement it, as that transaction never happened and as
+            // such, is _not_ the last seen
+            if (maxTxId.get() == l.getFirstTxId()) {
+              maxTxId.reset(maxTxId.get() - 1);
+            }
+
+            zkc.delete(znode, -1);
+          }
         }
-        finalizeLogSegment(l.getFirstTxId(), endTxId);
       } catch (KeeperException.NoNodeException nne) {
           // nothing to recover, ignore
-      } finally {
-        if (wl.haveLock()) {
-          wl.release();
-        }
+      } catch (KeeperException ke) {
+        throw new IOException("Couldn't get list of inprogress segments", ke);
+      } catch (InterruptedException ie) {
+        throw new IOException("Interrupted getting list of inprogress segments",
+                              ie);
       }
     }
   }
@@ -398,9 +498,8 @@
   @Override
   public void purgeLogsOlderThan(long minTxIdToKeep)
       throws IOException {
-    for (EditLogLedgerMetadata l : getLedgerList()) {
-      if (!l.isInProgress()
-          && l.getLastTxId() < minTxIdToKeep) {
+    for (EditLogLedgerMetadata l : getLedgerList(false)) {
+      if (l.getLastTxId() < minTxIdToKeep) {
         try {
           Stat stat = zkc.exists(l.getZkPath(), false);
           zkc.delete(l.getZkPath(), stat.getVersion());
@@ -421,8 +520,10 @@
     try {
       bkc.close();
       zkc.close();
-    } catch (Exception e) {
-      throw new IOException("Couldn't close zookeeper client", e);
+    } catch (BKException bke) {
+      throw new IOException("Couldn't close bookkeeper client", bke);
+    } catch (InterruptedException ie) {
+      throw new IOException("Interrupted while closing journal manager", ie);
     }
   }
 
@@ -440,14 +541,34 @@
    * Find the id of the last edit log transaction writen to a edit log
    * ledger.
    */
-  private long recoverLastTxId(EditLogLedgerMetadata l) throws IOException {
+  private long recoverLastTxId(EditLogLedgerMetadata l, boolean fence)
+      throws IOException, SegmentEmptyException {
+    LedgerHandle lh = null;
     try {
-      LedgerHandle lh = bkc.openLedger(l.getLedgerId(),
-                                       BookKeeper.DigestType.MAC,
-                                       digestpw.getBytes());
+      if (fence) {
+        lh = bkc.openLedger(l.getLedgerId(),
+                            BookKeeper.DigestType.MAC,
+                            digestpw.getBytes());
+      } else {
+        lh = bkc.openLedgerNoRecovery(l.getLedgerId(),
+                                      BookKeeper.DigestType.MAC,
+                                      digestpw.getBytes());
+      }
+    } catch (BKException bke) {
+      throw new IOException("Exception opening ledger for " + l, bke);
+    } catch (InterruptedException ie) {
+      throw new IOException("Interrupted opening ledger for " + l, ie);
+    }
+
+    BookKeeperEditLogInputStream in = null;
+
+    try {
       long lastAddConfirmed = lh.getLastAddConfirmed();
-      BookKeeperEditLogInputStream in
-        = new BookKeeperEditLogInputStream(lh, l, lastAddConfirmed);
+      if (lastAddConfirmed == -1) {
+        throw new SegmentEmptyException();
+      }
+
+      in = new BookKeeperEditLogInputStream(lh, l, lastAddConfirmed);
 
       long endTxId = HdfsConstants.INVALID_TXID;
       FSEditLogOp op = in.readOp();
@@ -459,25 +580,41 @@
         op = in.readOp();
       }
       return endTxId;
-    } catch (Exception e) {
-      throw new IOException("Exception retreiving last tx id for ledger " + l,
-                            e);
+    } finally {
+      if (in != null) {
+        in.close();
+      }
     }
   }
 
   /**
    * Get a list of all segments in the journal.
    */
-  private List<EditLogLedgerMetadata> getLedgerList() throws IOException {
+  List<EditLogLedgerMetadata> getLedgerList(boolean inProgressOk)
+      throws IOException {
     List<EditLogLedgerMetadata> ledgers
       = new ArrayList<EditLogLedgerMetadata>();
     try {
       List<String> ledgerNames = zkc.getChildren(ledgerPath, false);
-      for (String n : ledgerNames) {
-        ledgers.add(EditLogLedgerMetadata.read(zkc, ledgerPath + "/" + n));
+      for (String ledgerName : ledgerNames) {
+        if (!inProgressOk && ledgerName.contains(BKJM_EDIT_INPROGRESS)) {
+          continue;
+        }
+        String legderMetadataPath = ledgerPath + "/" + ledgerName;
+        try {
+          EditLogLedgerMetadata editLogLedgerMetadata = EditLogLedgerMetadata
+              .read(zkc, legderMetadataPath);
+          ledgers.add(editLogLedgerMetadata);
+        } catch (KeeperException.NoNodeException e) {
+          LOG.warn("ZNode: " + legderMetadataPath
+              + " might have finalized and deleted."
+              + " So ignoring NoNodeException.");
+        }
       }
-    } catch (Exception e) {
+    } catch (KeeperException e) {
       throw new IOException("Exception reading ledger list from zk", e);
+    } catch (InterruptedException ie) {
+      throw new IOException("Interrupted getting list of ledgers from zk", ie);
     }
 
     Collections.sort(ledgers, EditLogLedgerMetadata.COMPARATOR);
@@ -495,8 +632,13 @@
   /**
    * Get the znode path for the inprogressZNode
    */
-  String inprogressZNode() {
-    return ledgerPath + "/inprogress";
+  String inprogressZNode(long startTxid) {
+    return ledgerPath + "/inprogress_" + Long.toString(startTxid, 16);
+  }
+
+  @VisibleForTesting
+  void setZooKeeper(ZooKeeper zk) {
+    this.zkc = zk;
   }
 
   /**
@@ -509,4 +651,7 @@
       }
     }
   }
+  
+  private static class SegmentEmptyException extends IOException {
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java
new file mode 100644
index 0000000..910d129
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java
@@ -0,0 +1,163 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.contrib.bkjournal;
+
+import java.io.IOException;
+import java.net.InetAddress;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.KeeperException.NodeExistsException;
+import org.apache.zookeeper.ZooDefs.Ids;
+import org.apache.zookeeper.data.Stat;
+
+/**
+ * Distributed write permission lock, using ZooKeeper. Read the version number
+ * and return the current inprogress node path available in CurrentInprogress
+ * path. If it exist, caller can treat that some other client already operating
+ * on it. Then caller can take action. If there is no inprogress node exist,
+ * then caller can treat that there is no client operating on it. Later same
+ * caller should update the his newly created inprogress node path. At this
+ * point, if some other activities done on this node, version number might
+ * change, so update will fail. So, this read, update api will ensure that there
+ * is only node can continue further after checking with CurrentInprogress.
+ */
+
+class CurrentInprogress {
+  private static final String CONTENT_DELIMITER = ",";
+
+  static final Log LOG = LogFactory.getLog(CurrentInprogress.class);
+
+  private final ZooKeeper zkc;
+  private final String currentInprogressNode;
+  private volatile int versionNumberForPermission = -1;
+  private static final int CURRENT_INPROGRESS_LAYOUT_VERSION = -1; 
+  private final String hostName = InetAddress.getLocalHost().toString();
+
+  CurrentInprogress(ZooKeeper zkc, String lockpath) throws IOException {
+    this.currentInprogressNode = lockpath;
+    this.zkc = zkc;
+    try {
+      Stat isCurrentInprogressNodeExists = zkc.exists(lockpath, false);
+      if (isCurrentInprogressNodeExists == null) {
+        try {
+          zkc.create(lockpath, null, Ids.OPEN_ACL_UNSAFE,
+                  CreateMode.PERSISTENT);
+        } catch (NodeExistsException e) {
+          // Node might created by other process at the same time. Ignore it.
+          if (LOG.isDebugEnabled()) {
+            LOG.debug(lockpath + " already created by other process.", e);
+          }
+        }
+      }
+    } catch (KeeperException e) {
+      throw new IOException("Exception accessing Zookeeper", e);
+    } catch (InterruptedException ie) {
+      throw new IOException("Interrupted accessing Zookeeper", ie);
+    }
+  }
+
+  /**
+   * Update the path with prepending version number and hostname
+   * 
+   * @param path
+   *          - to be updated in zookeeper
+   * @throws IOException
+   */
+  void update(String path) throws IOException {
+    String content = CURRENT_INPROGRESS_LAYOUT_VERSION
+        + CONTENT_DELIMITER + hostName + CONTENT_DELIMITER + path;
+    try {
+      zkc.setData(this.currentInprogressNode, content.getBytes(),
+          this.versionNumberForPermission);
+    } catch (KeeperException e) {
+      throw new IOException("Exception when setting the data "
+          + "[layout version number,hostname,inprogressNode path]= [" + content
+          + "] to CurrentInprogress. ", e);
+    } catch (InterruptedException e) {
+      throw new IOException("Interrupted while setting the data "
+          + "[layout version number,hostname,inprogressNode path]= [" + content
+          + "] to CurrentInprogress", e);
+    }
+    LOG.info("Updated data[layout version number,hostname,inprogressNode path]"
+        + "= [" + content + "] to CurrentInprogress");
+  }
+
+  /**
+   * Read the CurrentInprogress node data from Zookeeper and also get the znode
+   * version number. Return the 3rd field from the data. i.e saved path with
+   * #update api
+   * 
+   * @return available inprogress node path. returns null if not available.
+   * @throws IOException
+   */
+  String read() throws IOException {
+    Stat stat = new Stat();
+    byte[] data = null;
+    try {
+      data = zkc.getData(this.currentInprogressNode, false, stat);
+    } catch (KeeperException e) {
+      throw new IOException("Exception while reading the data from "
+          + currentInprogressNode, e);
+    } catch (InterruptedException e) {
+      throw new IOException("Interrupted while reading data from "
+          + currentInprogressNode, e);
+    }
+    this.versionNumberForPermission = stat.getVersion();
+    if (data != null) {
+      String stringData = new String(data);
+      LOG.info("Read data[layout version number,hostname,inprogressNode path]"
+          + "= [" + stringData + "] from CurrentInprogress");
+      String[] contents = stringData.split(CONTENT_DELIMITER);
+      assert contents.length == 3 : "As per the current data format, "
+          + "CurrentInprogress node data should contain 3 fields. "
+          + "i.e layout version number,hostname,inprogressNode path";
+      String layoutVersion = contents[0];
+      if (Long.valueOf(layoutVersion) > CURRENT_INPROGRESS_LAYOUT_VERSION) {
+        throw new IOException(
+            "Supported layout version of CurrentInprogress node is : "
+                + CURRENT_INPROGRESS_LAYOUT_VERSION
+                + " . Layout version of CurrentInprogress node in ZK is : "
+                + layoutVersion);
+      }
+      String inprogressNodePath = contents[2];
+      return inprogressNodePath;
+    } else {
+      LOG.info("No data available in CurrentInprogress");
+    }
+    return null;
+  }
+
+  /** Clear the CurrentInprogress node data */
+  void clear() throws IOException {
+    try {
+      zkc.setData(this.currentInprogressNode, null, versionNumberForPermission);
+    } catch (KeeperException e) {
+      throw new IOException(
+          "Exception when setting the data to CurrentInprogress node", e);
+    } catch (InterruptedException e) {
+      throw new IOException(
+          "Interrupted when setting the data to CurrentInprogress node", e);
+    }
+    LOG.info("Cleared the data from CurrentInprogress");
+  }
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java
index 9ae5cdd..6c75cd1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java
@@ -130,8 +130,10 @@
       }
     } catch(KeeperException.NoNodeException nne) {
       throw nne;
-    } catch(Exception e) {
-      throw new IOException("Error reading from zookeeper", e);
+    } catch(KeeperException ke) {
+      throw new IOException("Error reading from zookeeper", ke);
+    } catch (InterruptedException ie) {
+      throw new IOException("Interrupted reading from zookeeper", ie);
     }
   }
     
@@ -147,13 +149,15 @@
           version, ledgerId, firstTxId, lastTxId);
     }
     try {
-      zkc.create(path, finalisedData.getBytes(), 
-                 Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
+      zkc.create(path, finalisedData.getBytes(), Ids.OPEN_ACL_UNSAFE,
+          CreateMode.PERSISTENT);
     } catch (KeeperException.NodeExistsException nee) {
       throw nee;
-    } catch (Exception e) {
-      throw new IOException("Error creating ledger znode");
-    } 
+    } catch (KeeperException e) {
+      throw new IOException("Error creating ledger znode", e);
+    } catch (InterruptedException ie) {
+      throw new IOException("Interrupted creating ledger znode", ie);
+    }
   }
   
   boolean verify(ZooKeeper zkc, String path) {
@@ -163,10 +167,13 @@
         LOG.trace("Verifying " + this.toString() 
                   + " against " + other);
       }
-      return other == this;
-    } catch (Exception e) {
+      return other.equals(this);
+    } catch (KeeperException e) {
       LOG.error("Couldn't verify data in " + path, e);
       return false;
+    } catch (IOException ie) {
+      LOG.error("Couldn't verify data in " + path, ie);
+      return false;
     }
   }
   
@@ -181,12 +188,12 @@
       && version == ol.version;
   }
 
- public int hashCode() { 
+  public int hashCode() {
     int hash = 1;
-    hash = hash * 31 + (int)ledgerId;
-    hash = hash * 31 + (int)firstTxId;
-    hash = hash * 31 + (int)lastTxId;
-    hash = hash * 31 + (int)version;
+    hash = hash * 31 + (int) ledgerId;
+    hash = hash * 31 + (int) firstTxId;
+    hash = hash * 31 + (int) lastTxId;
+    hash = hash * 31 + (int) version;
     return hash;
   }
     
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/MaxTxId.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/MaxTxId.java
index f272409..0109c33 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/MaxTxId.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/MaxTxId.java
@@ -18,13 +18,14 @@
 package org.apache.hadoop.contrib.bkjournal;
 
 import java.io.IOException;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.ZooDefs.Ids;
-import org.apache.zookeeper.data.Stat;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.ZooDefs.Ids;
+import org.apache.zookeeper.data.Stat;
 
 /**
  * Utility class for storing and reading
@@ -49,18 +50,24 @@
       if (LOG.isTraceEnabled()) {
         LOG.trace("Setting maxTxId to " + maxTxId);
       }
-      String txidStr = Long.toString(maxTxId);
-      try {
-        if (currentStat != null) {
-          currentStat = zkc.setData(path, txidStr.getBytes("UTF-8"), 
-                                    currentStat.getVersion());
-        } else {
-          zkc.create(path, txidStr.getBytes("UTF-8"), 
-                     Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
-        }
-      } catch (Exception e) {
-        throw new IOException("Error writing max tx id", e);
+      reset(maxTxId);
+    }
+  }
+
+  synchronized void reset(long maxTxId) throws IOException {
+    String txidStr = Long.toString(maxTxId);
+    try {
+      if (currentStat != null) {
+        currentStat = zkc.setData(path, txidStr.getBytes("UTF-8"), currentStat
+            .getVersion());
+      } else {
+        zkc.create(path, txidStr.getBytes("UTF-8"), Ids.OPEN_ACL_UNSAFE,
+            CreateMode.PERSISTENT);
       }
+    } catch (KeeperException e) {
+      throw new IOException("Error writing max tx id", e);
+    } catch (InterruptedException e) {
+      throw new IOException("Interrupted while writing max tx id", e);
     }
   }
 
@@ -74,8 +81,10 @@
         String txidString = new String(bytes, "UTF-8");
         return Long.valueOf(txidString);
       }
-    } catch (Exception e) {
+    } catch (KeeperException e) {
       throw new IOException("Error reading the max tx id from zk", e);
+    } catch (InterruptedException ie) {
+      throw new IOException("Interrupted while reading thr max tx id", ie);
     }
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/WriteLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/WriteLock.java
deleted file mode 100644
index 67743b2..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/WriteLock.java
+++ /dev/null
@@ -1,186 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.contrib.bkjournal;
-
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.Watcher.Event.KeeperState;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.ZooDefs.Ids;
-
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.List;
-import java.util.Collections;
-import java.util.Comparator;
-
-import java.net.InetAddress;
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-/**
- * Distributed lock, using ZooKeeper.
- *
- * The lock is vulnerable to timing issues. For example, the process could
- * encounter a really long GC cycle between acquiring the lock, and writing to
- * a ledger. This could have timed out the lock, and another process could have
- * acquired the lock and started writing to bookkeeper. Therefore other
- * mechanisms are required to ensure correctness (i.e. Fencing).
- */
-class WriteLock implements Watcher {
-  static final Log LOG = LogFactory.getLog(WriteLock.class);
-
-  private final ZooKeeper zkc;
-  private final String lockpath;
-
-  private AtomicInteger lockCount = new AtomicInteger(0);
-  private String myznode = null;
-
-  WriteLock(ZooKeeper zkc, String lockpath) throws IOException {
-    this.lockpath = lockpath;
-
-    this.zkc = zkc;
-    try {
-      if (zkc.exists(lockpath, false) == null) {
-        String localString = InetAddress.getLocalHost().toString();
-        zkc.create(lockpath, localString.getBytes(),
-                   Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
-      }
-    } catch (Exception e) {
-      throw new IOException("Exception accessing Zookeeper", e);
-    }
-  }
-
-  void acquire() throws IOException {
-    while (true) {
-      if (lockCount.get() == 0) {
-        try {
-          synchronized(this) {
-            if (lockCount.get() > 0) {
-              lockCount.incrementAndGet();
-              return;
-            }
-            myznode = zkc.create(lockpath + "/lock-", new byte[] {'0'},
-                                 Ids.OPEN_ACL_UNSAFE,
-                                 CreateMode.EPHEMERAL_SEQUENTIAL);
-            if (LOG.isTraceEnabled()) {
-              LOG.trace("Acquiring lock, trying " + myznode);
-            }
-
-            List<String> nodes = zkc.getChildren(lockpath, false);
-            Collections.sort(nodes, new Comparator<String>() {
-                public int compare(String o1,
-                                   String o2) {
-                  Integer l1 = Integer.valueOf(o1.replace("lock-", ""));
-                  Integer l2 = Integer.valueOf(o2.replace("lock-", ""));
-                  return l1 - l2;
-                }
-              });
-            if ((lockpath + "/" + nodes.get(0)).equals(myznode)) {
-              if (LOG.isTraceEnabled()) {
-                LOG.trace("Lock acquired - " + myznode);
-              }
-              lockCount.set(1);
-              zkc.exists(myznode, this);
-              return;
-            } else {
-              LOG.error("Failed to acquire lock with " + myznode
-                        + ", " + nodes.get(0) + " already has it");
-              throw new IOException("Could not acquire lock");
-            }
-          }
-        } catch (KeeperException e) {
-          throw new IOException("Exception accessing Zookeeper", e);
-        } catch (InterruptedException ie) {
-          throw new IOException("Exception accessing Zookeeper", ie);
-        }
-      } else {
-        int ret = lockCount.getAndIncrement();
-        if (ret == 0) {
-          lockCount.decrementAndGet();
-          continue; // try again;
-        } else {
-          return;
-        }
-      }
-    }
-  }
-
-  void release() throws IOException {
-    try {
-      if (lockCount.decrementAndGet() <= 0) {
-        if (lockCount.get() < 0) {
-          LOG.warn("Unbalanced lock handling somewhere, lockCount down to "
-                   + lockCount.get());
-        }
-        synchronized(this) {
-          if (lockCount.get() <= 0) {
-            if (LOG.isTraceEnabled()) {
-              LOG.trace("releasing lock " + myznode);
-            }
-            if (myznode != null) {
-              zkc.delete(myznode, -1);
-              myznode = null;
-            }
-          }
-        }
-      }
-    } catch (Exception e) {
-      throw new IOException("Exception accessing Zookeeper", e);
-    }
-  }
-
-  public void checkWriteLock() throws IOException {
-    if (!haveLock()) {
-      throw new IOException("Lost writer lock");
-    }
-  }
-
-  boolean haveLock() throws IOException {
-    return lockCount.get() > 0;
-  }
-
-  public void process(WatchedEvent event) {
-    if (event.getState() == KeeperState.Disconnected
-        || event.getState() == KeeperState.Expired) {
-      LOG.warn("Lost zookeeper session, lost lock ");
-      lockCount.set(0);
-    } else {
-      // reapply the watch
-      synchronized (this) {
-        LOG.info("Zookeeper event " + event
-                 + " received, reapplying watch to " + myznode);
-        if (myznode != null) {
-          try {
-            zkc.exists(myznode, this);
-          } catch (Exception e) {
-            LOG.warn("Could not set watch on lock, releasing", e);
-            try {
-              release();
-            } catch (IOException ioe) {
-              LOG.error("Could not release Zk lock", ioe);
-            }
-          }
-        }
-      }
-    }
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/BKJMUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/BKJMUtil.java
new file mode 100644
index 0000000..32b0583
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/BKJMUtil.java
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.contrib.bkjournal;
+
+import static org.junit.Assert.*;
+
+import java.net.URI;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.KeeperException;
+
+import org.apache.bookkeeper.proto.BookieServer;
+import org.apache.bookkeeper.conf.ServerConfiguration;
+import org.apache.bookkeeper.util.LocalBookKeeper;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.List;
+
+import java.io.IOException;
+import java.io.File;
+
+/**
+ * Utility class for setting up bookkeeper ensembles
+ * and bringing individual bookies up and down
+ */
+class BKJMUtil {
+  protected static final Log LOG = LogFactory.getLog(BKJMUtil.class);
+
+  int nextPort = 6000; // next port for additionally created bookies
+  private Thread bkthread = null;
+  private final static String zkEnsemble = "127.0.0.1:2181";
+  int numBookies;
+
+  BKJMUtil(final int numBookies) throws Exception {
+    this.numBookies = numBookies;
+
+    bkthread = new Thread() {
+        public void run() {
+          try {
+            String[] args = new String[1];
+            args[0] = String.valueOf(numBookies);
+            LOG.info("Starting bk");
+            LocalBookKeeper.main(args);
+          } catch (InterruptedException e) {
+            // go away quietly
+          } catch (Exception e) {
+            LOG.error("Error starting local bk", e);
+          }
+        }
+      };
+  }
+
+  void start() throws Exception {
+    bkthread.start();
+    if (!LocalBookKeeper.waitForServerUp(zkEnsemble, 10000)) {
+      throw new Exception("Error starting zookeeper/bookkeeper");
+    }
+    assertEquals("Not all bookies started",
+                 numBookies, checkBookiesUp(numBookies, 10));
+  }
+
+  void teardown() throws Exception {
+    if (bkthread != null) {
+      bkthread.interrupt();
+      bkthread.join();
+    }
+  }
+
+  static ZooKeeper connectZooKeeper()
+      throws IOException, KeeperException, InterruptedException {
+    final CountDownLatch latch = new CountDownLatch(1);
+
+    ZooKeeper zkc = new ZooKeeper(zkEnsemble, 3600, new Watcher() {
+        public void process(WatchedEvent event) {
+          if (event.getState() == Watcher.Event.KeeperState.SyncConnected) {
+            latch.countDown();
+          }
+        }
+      });
+    if (!latch.await(3, TimeUnit.SECONDS)) {
+      throw new IOException("Zookeeper took too long to connect");
+    }
+    return zkc;
+  }
+
+  static URI createJournalURI(String path) throws Exception {
+    return URI.create("bookkeeper://" + zkEnsemble + path);
+  }
+
+  static void addJournalManagerDefinition(Configuration conf) {
+    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".bookkeeper",
+             "org.apache.hadoop.contrib.bkjournal.BookKeeperJournalManager");
+  }
+
+  BookieServer newBookie() throws Exception {
+    int port = nextPort++;
+    ServerConfiguration bookieConf = new ServerConfiguration();
+    bookieConf.setBookiePort(port);
+    File tmpdir = File.createTempFile("bookie" + Integer.toString(port) + "_",
+                                      "test");
+    tmpdir.delete();
+    tmpdir.mkdir();
+
+    bookieConf.setZkServers(zkEnsemble);
+    bookieConf.setJournalDirName(tmpdir.getPath());
+    bookieConf.setLedgerDirNames(new String[] { tmpdir.getPath() });
+
+    BookieServer b = new BookieServer(bookieConf);
+    b.start();
+    for (int i = 0; i < 10 && !b.isRunning(); i++) {
+      Thread.sleep(10000);
+    }
+    if (!b.isRunning()) {
+      throw new IOException("Bookie would not start");
+    }
+    return b;
+  }
+
+  /**
+   * Check that a number of bookies are available
+   * @param count number of bookies required
+   * @param timeout number of seconds to wait for bookies to start
+   * @throws IOException if bookies are not started by the time the timeout hits
+   */
+  int checkBookiesUp(int count, int timeout) throws Exception {
+    ZooKeeper zkc = connectZooKeeper();
+    try {
+      boolean up = false;
+      int mostRecentSize = 0;
+      for (int i = 0; i < timeout; i++) {
+        try {
+          List<String> children = zkc.getChildren("/ledgers/available",
+                                                  false);
+          mostRecentSize = children.size();
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Found " + mostRecentSize + " bookies up, "
+                      + "waiting for " + count);
+            if (LOG.isTraceEnabled()) {
+              for (String child : children) {
+                LOG.trace(" server: " + child);
+              }
+            }
+          }
+          if (mostRecentSize == count) {
+            up = true;
+            break;
+          }
+        } catch (KeeperException e) {
+          // ignore
+        }
+        Thread.sleep(1000);
+      }
+      return mostRecentSize;
+    } finally {
+      zkc.close();
+    }
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
new file mode 100644
index 0000000..7f97a6d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
@@ -0,0 +1,262 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.contrib.bkjournal;
+
+import static org.junit.Assert.*;
+import org.junit.Test;
+import org.junit.Before;
+import org.junit.After;
+import org.junit.BeforeClass;
+import org.junit.AfterClass;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+
+import org.apache.hadoop.hdfs.HAUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+
+import org.apache.hadoop.ha.ServiceFailedException;
+import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogTestUtil;
+
+import org.apache.hadoop.ipc.RemoteException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+import org.apache.bookkeeper.proto.BookieServer;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import java.io.IOException;
+
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.atLeastOnce;
+import static org.mockito.Mockito.verify;
+
+/**
+ * Integration test to ensure that the BookKeeper JournalManager
+ * works for HDFS Namenode HA
+ */
+public class TestBookKeeperAsHASharedDir {
+  static final Log LOG = LogFactory.getLog(TestBookKeeperAsHASharedDir.class);
+
+  private static BKJMUtil bkutil;
+  static int numBookies = 3;
+
+  private static final String TEST_FILE_DATA = "HA BookKeeperJournalManager";
+
+  @BeforeClass
+  public static void setupBookkeeper() throws Exception {
+    bkutil = new BKJMUtil(numBookies);
+    bkutil.start();
+  }
+
+  @AfterClass
+  public static void teardownBookkeeper() throws Exception {
+    bkutil.teardown();
+  }
+
+  /**
+   * Test simple HA failover usecase with BK
+   */
+  @Test
+  public void testFailoverWithBK() throws Exception {
+    Runtime mockRuntime1 = mock(Runtime.class);
+    Runtime mockRuntime2 = mock(Runtime.class);
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+      conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
+               BKJMUtil.createJournalURI("/hotfailover").toString());
+      BKJMUtil.addJournalManagerDefinition(conf);
+
+      cluster = new MiniDFSCluster.Builder(conf)
+        .nnTopology(MiniDFSNNTopology.simpleHATopology())
+        .numDataNodes(0)
+        .manageNameDfsSharedDirs(false)
+        .build();
+      NameNode nn1 = cluster.getNameNode(0);
+      NameNode nn2 = cluster.getNameNode(1);
+      FSEditLogTestUtil.setRuntimeForEditLog(nn1, mockRuntime1);
+      FSEditLogTestUtil.setRuntimeForEditLog(nn2, mockRuntime2);
+
+      cluster.waitActive();
+      cluster.transitionToActive(0);
+
+      Path p = new Path("/testBKJMfailover");
+
+      FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
+
+      fs.mkdirs(p);
+      cluster.shutdownNameNode(0);
+
+      cluster.transitionToActive(1);
+
+      assertTrue(fs.exists(p));
+    } finally {
+      verify(mockRuntime1, times(0)).exit(anyInt());
+      verify(mockRuntime2, times(0)).exit(anyInt());
+
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  /**
+   * Test HA failover, where BK, as the shared storage, fails.
+   * Once it becomes available again, a standby can come up.
+   * Verify that any write happening after the BK fail is not
+   * available on the standby.
+   */
+  @Test
+  public void testFailoverWithFailingBKCluster() throws Exception {
+    int ensembleSize = numBookies + 1;
+    BookieServer newBookie = bkutil.newBookie();
+    assertEquals("New bookie didn't start",
+                 ensembleSize, bkutil.checkBookiesUp(ensembleSize, 10));
+
+    BookieServer replacementBookie = null;
+
+    Runtime mockRuntime1 = mock(Runtime.class);
+    Runtime mockRuntime2 = mock(Runtime.class);
+
+    MiniDFSCluster cluster = null;
+
+    try {
+      Configuration conf = new Configuration();
+      conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+      conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
+               BKJMUtil.createJournalURI("/hotfailoverWithFail").toString());
+      conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,
+                  ensembleSize);
+      conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,
+                  ensembleSize);
+      BKJMUtil.addJournalManagerDefinition(conf);
+
+      cluster = new MiniDFSCluster.Builder(conf)
+        .nnTopology(MiniDFSNNTopology.simpleHATopology())
+        .numDataNodes(0)
+        .manageNameDfsSharedDirs(false)
+        .build();
+      NameNode nn1 = cluster.getNameNode(0);
+      NameNode nn2 = cluster.getNameNode(1);
+      FSEditLogTestUtil.setRuntimeForEditLog(nn1, mockRuntime1);
+      FSEditLogTestUtil.setRuntimeForEditLog(nn2, mockRuntime2);
+
+      cluster.waitActive();
+      cluster.transitionToActive(0);
+
+      Path p1 = new Path("/testBKJMFailingBKCluster1");
+      Path p2 = new Path("/testBKJMFailingBKCluster2");
+
+      FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
+
+      fs.mkdirs(p1);
+      newBookie.shutdown(); // will take down shared storage
+      assertEquals("New bookie didn't stop",
+                   numBookies, bkutil.checkBookiesUp(numBookies, 10));
+
+      // mkdirs will "succeed", but nn have called runtime.exit
+      fs.mkdirs(p2);
+      verify(mockRuntime1, atLeastOnce()).exit(anyInt());
+      verify(mockRuntime2, times(0)).exit(anyInt());
+      cluster.shutdownNameNode(0);
+
+      try {
+        cluster.transitionToActive(1);
+        fail("Shouldn't have been able to transition with bookies down");
+      } catch (ServiceFailedException e) {
+        assertTrue("Wrong exception",
+            e.getMessage().contains("Failed to start active services"));
+      }
+      verify(mockRuntime2, atLeastOnce()).exit(anyInt());
+
+      replacementBookie = bkutil.newBookie();
+      assertEquals("Replacement bookie didn't start",
+                   ensembleSize, bkutil.checkBookiesUp(ensembleSize, 10));
+      cluster.transitionToActive(1); // should work fine now
+
+      assertTrue(fs.exists(p1));
+      assertFalse(fs.exists(p2));
+    } finally {
+      newBookie.shutdown();
+      if (replacementBookie != null) {
+        replacementBookie.shutdown();
+      }
+
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  /**
+   * Test that two namenodes can't continue as primary
+   */
+  @Test
+  public void testMultiplePrimariesStarted() throws Exception {
+    Runtime mockRuntime1 = mock(Runtime.class);
+    Runtime mockRuntime2 = mock(Runtime.class);
+    Path p1 = new Path("/testBKJMMultiplePrimary");
+
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+      conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
+               BKJMUtil.createJournalURI("/hotfailoverMultiple").toString());
+      BKJMUtil.addJournalManagerDefinition(conf);
+
+      cluster = new MiniDFSCluster.Builder(conf)
+        .nnTopology(MiniDFSNNTopology.simpleHATopology())
+        .numDataNodes(0)
+        .manageNameDfsSharedDirs(false)
+        .build();
+      NameNode nn1 = cluster.getNameNode(0);
+      NameNode nn2 = cluster.getNameNode(1);
+      FSEditLogTestUtil.setRuntimeForEditLog(nn1, mockRuntime1);
+      FSEditLogTestUtil.setRuntimeForEditLog(nn2, mockRuntime2);
+      cluster.waitActive();
+      cluster.transitionToActive(0);
+
+      FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
+      fs.mkdirs(p1);
+      nn1.getRpcServer().rollEditLog();
+      cluster.transitionToActive(1);
+      fs = cluster.getFileSystem(0); // get the older active server.
+      // This edit log updation on older active should make older active
+      // shutdown.
+      fs.delete(p1, true);
+      verify(mockRuntime1, atLeastOnce()).exit(anyInt());
+      verify(mockRuntime2, times(0)).exit(anyInt());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperEditLogStreams.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperEditLogStreams.java
new file mode 100644
index 0000000..3710676
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperEditLogStreams.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.contrib.bkjournal;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import org.apache.bookkeeper.client.BookKeeper;
+import org.apache.bookkeeper.client.LedgerHandle;
+import org.apache.bookkeeper.conf.ClientConfiguration;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.zookeeper.ZooKeeper;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Unit test for the bkjm's streams
+ */
+public class TestBookKeeperEditLogStreams {
+  static final Log LOG = LogFactory.getLog(TestBookKeeperEditLogStreams.class);
+
+  private static BKJMUtil bkutil;
+  private final static int numBookies = 3;
+
+  @BeforeClass
+  public static void setupBookkeeper() throws Exception {
+    bkutil = new BKJMUtil(numBookies);
+    bkutil.start();
+  }
+
+  @AfterClass
+  public static void teardownBookkeeper() throws Exception {
+    bkutil.teardown();
+  }
+
+  /**
+   * Test that bkjm will refuse open a stream on an empty
+   * ledger.
+   */
+  @Test
+  public void testEmptyInputStream() throws Exception {
+    ZooKeeper zk = BKJMUtil.connectZooKeeper();
+
+    BookKeeper bkc = new BookKeeper(new ClientConfiguration(), zk);
+    try {
+      LedgerHandle lh = bkc.createLedger(BookKeeper.DigestType.CRC32, "foobar"
+          .getBytes());
+      lh.close();
+
+      EditLogLedgerMetadata metadata = new EditLogLedgerMetadata("/foobar",
+          HdfsConstants.LAYOUT_VERSION, lh.getId(), 0x1234);
+      try {
+        new BookKeeperEditLogInputStream(lh, metadata, -1);
+        fail("Shouldn't get this far, should have thrown");
+      } catch (IOException ioe) {
+        assertTrue(ioe.getMessage().contains("Invalid first bk entry to read"));
+      }
+
+      metadata = new EditLogLedgerMetadata("/foobar",
+          HdfsConstants.LAYOUT_VERSION, lh.getId(), 0x1234);
+      try {
+        new BookKeeperEditLogInputStream(lh, metadata, 0);
+        fail("Shouldn't get this far, should have thrown");
+      } catch (IOException ioe) {
+        assertTrue(ioe.getMessage().contains("Invalid first bk entry to read"));
+      }
+    } finally {
+      bkc.close();
+      zk.close();
+    }
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
new file mode 100644
index 0000000..cb7ba3f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.contrib.bkjournal;
+
+import org.apache.hadoop.hdfs.server.namenode.ha.TestStandbyCheckpoints;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+
+import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
+
+import org.junit.Before;
+import org.junit.After;
+
+import org.junit.BeforeClass;
+import org.junit.AfterClass;
+
+/**
+ * Runs the same tests as TestStandbyCheckpoints, but
+ * using a bookkeeper journal manager as the shared directory
+ */
+public class TestBookKeeperHACheckpoints extends TestStandbyCheckpoints {
+  private static BKJMUtil bkutil = null;
+  static int numBookies = 3;
+  static int journalCount = 0;
+
+  @Override
+  @Before
+  public void setupCluster() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5);
+    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
+             BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
+             .toString());
+    BKJMUtil.addJournalManagerDefinition(conf);
+
+    MiniDFSNNTopology topology = new MiniDFSNNTopology()
+      .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+        .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
+        .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
+
+    cluster = new MiniDFSCluster.Builder(conf)
+      .nnTopology(topology)
+      .numDataNodes(0)
+      .manageNameDfsSharedDirs(false)
+      .build();
+    cluster.waitActive();
+
+    nn0 = cluster.getNameNode(0);
+    nn1 = cluster.getNameNode(1);
+    fs = HATestUtil.configureFailoverFs(cluster, conf);
+
+    cluster.transitionToActive(0);
+  }
+
+  @BeforeClass
+  public static void startBK() throws Exception {
+    journalCount = 0;
+    bkutil = new BKJMUtil(numBookies);
+    bkutil.start();
+  }
+
+  @AfterClass
+  public static void shutdownBK() throws Exception {
+    if (bkutil != null) {
+      bkutil.teardown();
+    }
+  }
+
+  @Override
+  public void testCheckpointCancellation() throws Exception {
+    // Overriden as the implementation in the superclass assumes that writes
+    // are to a file. This should be fixed at some point
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java
index 5937fa8..9476dea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java
@@ -18,53 +18,31 @@
 package org.apache.hadoop.contrib.bkjournal;
 
 import static org.junit.Assert.*;
-
-import java.net.URI;
-import java.util.Collections;
-import java.util.Arrays;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.bookkeeper.util.LocalBookKeeper;
-
-import java.io.RandomAccessFile;
-import java.io.File;
-import java.io.FilenameFilter;
-import java.io.BufferedInputStream;
-import java.io.DataInputStream;
-import java.io.IOException;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.security.SecurityUtil;
+import static org.mockito.Mockito.spy;
 import org.junit.Test;
 import org.junit.Before;
 import org.junit.After;
 import org.junit.BeforeClass;
 import org.junit.AfterClass;
+import org.mockito.Mockito;
 
-import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.setupEdits;
+import java.io.IOException;
+import java.net.URI;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+
 import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
 import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.JournalManager;
 
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.WatchedEvent;
+import org.apache.bookkeeper.proto.BookieServer;
+import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
-
-import com.google.common.collect.ImmutableList;
-
-import java.util.zip.CheckedInputStream;
-import java.util.zip.Checksum;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.ZooDefs.Ids;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -73,79 +51,26 @@
   static final Log LOG = LogFactory.getLog(TestBookKeeperJournalManager.class);
   
   private static final long DEFAULT_SEGMENT_SIZE = 1000;
-  private static final String zkEnsemble = "localhost:2181";
 
-  private static Thread bkthread;
   protected static Configuration conf = new Configuration();
   private ZooKeeper zkc;
-
-  private static ZooKeeper connectZooKeeper(String ensemble) 
-      throws IOException, KeeperException, InterruptedException {
-    final CountDownLatch latch = new CountDownLatch(1);
-        
-    ZooKeeper zkc = new ZooKeeper(zkEnsemble, 3600, new Watcher() {
-        public void process(WatchedEvent event) {
-          if (event.getState() == Watcher.Event.KeeperState.SyncConnected) {
-            latch.countDown();
-          }
-        }
-      });
-    if (!latch.await(3, TimeUnit.SECONDS)) {
-      throw new IOException("Zookeeper took too long to connect");
-    }
-    return zkc;
-  }
+  private static BKJMUtil bkutil;
+  static int numBookies = 3;
 
   @BeforeClass
   public static void setupBookkeeper() throws Exception {
-    final int numBookies = 5;
-    bkthread = new Thread() {
-        public void run() {
-          try {
-            String[] args = new String[1];
-            args[0] = String.valueOf(numBookies);
-            LOG.info("Starting bk");
-            LocalBookKeeper.main(args);
-          } catch (InterruptedException e) {
-            // go away quietly
-          } catch (Exception e) {
-            LOG.error("Error starting local bk", e);
-          }
-        }
-      };
-    bkthread.start();
-    
-    if (!LocalBookKeeper.waitForServerUp(zkEnsemble, 10000)) {
-      throw new Exception("Error starting zookeeper/bookkeeper");
-    }
-
-    ZooKeeper zkc = connectZooKeeper(zkEnsemble);
-    try {
-      boolean up = false;
-      for (int i = 0; i < 10; i++) {
-        try {
-          List<String> children = zkc.getChildren("/ledgers/available", 
-                                                  false);
-          if (children.size() == numBookies) {
-            up = true;
-            break;
-          }
-        } catch (KeeperException e) {
-          // ignore
-        }
-        Thread.sleep(1000);
-      }
-      if (!up) {
-        throw new IOException("Not enough bookies started");
-      }
-    } finally {
-      zkc.close();
-    }
+    bkutil = new BKJMUtil(numBookies);
+    bkutil.start();
   }
-  
+
+  @AfterClass
+  public static void teardownBookkeeper() throws Exception {
+    bkutil.teardown();
+  }
+
   @Before
   public void setup() throws Exception {
-    zkc = connectZooKeeper(zkEnsemble);
+    zkc = BKJMUtil.connectZooKeeper();
   }
 
   @After
@@ -153,19 +78,10 @@
     zkc.close();
   }
 
-  @AfterClass
-  public static void teardownBookkeeper() throws Exception {
-    if (bkthread != null) {
-      bkthread.interrupt();
-      bkthread.join();
-    }
-  }
-
   @Test
   public void testSimpleWrite() throws Exception {
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
-        URI.create("bookkeeper://" + zkEnsemble + "/hdfsjournal-simplewrite"));
-    long txid = 1;
+        BKJMUtil.createJournalURI("/hdfsjournal-simplewrite"));
     EditLogOutputStream out = bkjm.startLogSegment(1);
     for (long i = 1 ; i <= 100; i++) {
       FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
@@ -178,14 +94,13 @@
     String zkpath = bkjm.finalizedLedgerZNode(1, 100);
     
     assertNotNull(zkc.exists(zkpath, false));
-    assertNull(zkc.exists(bkjm.inprogressZNode(), false));
+    assertNull(zkc.exists(bkjm.inprogressZNode(1), false));
   }
 
   @Test
   public void testNumberOfTransactions() throws Exception {
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, 
-        URI.create("bookkeeper://" + zkEnsemble + "/hdfsjournal-txncount"));
-    long txid = 1;
+    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
+        BKJMUtil.createJournalURI("/hdfsjournal-txncount"));
     EditLogOutputStream out = bkjm.startLogSegment(1);
     for (long i = 1 ; i <= 100; i++) {
       FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
@@ -201,8 +116,8 @@
 
   @Test 
   public void testNumberOfTransactionsWithGaps() throws Exception {
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, 
-        URI.create("bookkeeper://" + zkEnsemble + "/hdfsjournal-gaps"));
+    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
+        BKJMUtil.createJournalURI("/hdfsjournal-gaps"));
     long txid = 1;
     for (long i = 0; i < 3; i++) {
       long start = txid;
@@ -214,9 +129,11 @@
       }
       out.close();
       bkjm.finalizeLogSegment(start, txid-1);
-      assertNotNull(zkc.exists(bkjm.finalizedLedgerZNode(start, txid-1), false));
+      assertNotNull(
+          zkc.exists(bkjm.finalizedLedgerZNode(start, txid-1), false));
     }
-    zkc.delete(bkjm.finalizedLedgerZNode(DEFAULT_SEGMENT_SIZE+1, DEFAULT_SEGMENT_SIZE*2), -1);
+    zkc.delete(bkjm.finalizedLedgerZNode(DEFAULT_SEGMENT_SIZE+1,
+                                         DEFAULT_SEGMENT_SIZE*2), -1);
     
     long numTrans = bkjm.getNumberOfTransactions(1, true);
     assertEquals(DEFAULT_SEGMENT_SIZE, numTrans);
@@ -234,8 +151,8 @@
 
   @Test
   public void testNumberOfTransactionsWithInprogressAtEnd() throws Exception {
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, 
-        URI.create("bookkeeper://" + zkEnsemble + "/hdfsjournal-inprogressAtEnd"));
+    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
+        BKJMUtil.createJournalURI("/hdfsjournal-inprogressAtEnd"));
     long txid = 1;
     for (long i = 0; i < 3; i++) {
       long start = txid;
@@ -248,7 +165,8 @@
       
       out.close();
       bkjm.finalizeLogSegment(start, (txid-1));
-      assertNotNull(zkc.exists(bkjm.finalizedLedgerZNode(start, (txid-1)), false));
+      assertNotNull(
+          zkc.exists(bkjm.finalizedLedgerZNode(start, (txid-1)), false));
     }
     long start = txid;
     EditLogOutputStream out = bkjm.startLogSegment(start);
@@ -272,8 +190,8 @@
    */
   @Test
   public void testWriteRestartFrom1() throws Exception {
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, 
-        URI.create("bookkeeper://" + zkEnsemble + "/hdfsjournal-restartFrom1"));
+    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
+        BKJMUtil.createJournalURI("/hdfsjournal-restartFrom1"));
     long txid = 1;
     long start = txid;
     EditLogOutputStream out = bkjm.startLogSegment(txid);
@@ -327,25 +245,26 @@
   @Test
   public void testTwoWriters() throws Exception {
     long start = 1;
-    BookKeeperJournalManager bkjm1 = new BookKeeperJournalManager(conf, 
-        URI.create("bookkeeper://" + zkEnsemble + "/hdfsjournal-dualWriter"));
-    BookKeeperJournalManager bkjm2 = new BookKeeperJournalManager(conf, 
-        URI.create("bookkeeper://" + zkEnsemble + "/hdfsjournal-dualWriter"));
+    BookKeeperJournalManager bkjm1 = new BookKeeperJournalManager(conf,
+        BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"));
+    BookKeeperJournalManager bkjm2 = new BookKeeperJournalManager(conf,
+        BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"));
     
     EditLogOutputStream out1 = bkjm1.startLogSegment(start);
     try {
-      EditLogOutputStream out2 = bkjm2.startLogSegment(start);
+      bkjm2.startLogSegment(start);
       fail("Shouldn't have been able to open the second writer");
     } catch (IOException ioe) {
       LOG.info("Caught exception as expected", ioe);
+    }finally{
+      out1.close();
     }
   }
 
   @Test
   public void testSimpleRead() throws Exception {
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, 
-        URI.create("bookkeeper://" + zkEnsemble + "/hdfsjournal-simpleread"));
-    long txid = 1;
+    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
+        BKJMUtil.createJournalURI("/hdfsjournal-simpleread"));
     final long numTransactions = 10000;
     EditLogOutputStream out = bkjm.startLogSegment(1);
     for (long i = 1 ; i <= numTransactions; i++) {
@@ -368,10 +287,9 @@
 
   @Test
   public void testSimpleRecovery() throws Exception {
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, 
-        URI.create("bookkeeper://" + zkEnsemble + "/hdfsjournal-simplerecovery"));
+    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
+        BKJMUtil.createJournalURI("/hdfsjournal-simplerecovery"));
     EditLogOutputStream out = bkjm.startLogSegment(1);
-    long txid = 1;
     for (long i = 1 ; i <= 100; i++) {
       FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
       op.setTransactionId(i);
@@ -385,11 +303,372 @@
 
 
     assertNull(zkc.exists(bkjm.finalizedLedgerZNode(1, 100), false));
-    assertNotNull(zkc.exists(bkjm.inprogressZNode(), false));
+    assertNotNull(zkc.exists(bkjm.inprogressZNode(1), false));
 
     bkjm.recoverUnfinalizedSegments();
 
     assertNotNull(zkc.exists(bkjm.finalizedLedgerZNode(1, 100), false));
-    assertNull(zkc.exists(bkjm.inprogressZNode(), false));
+    assertNull(zkc.exists(bkjm.inprogressZNode(1), false));
+  }
+
+  /**
+   * Test that if enough bookies fail to prevent an ensemble,
+   * writes the bookkeeper will fail. Test that when once again
+   * an ensemble is available, it can continue to write.
+   */
+  @Test
+  public void testAllBookieFailure() throws Exception {
+    BookieServer bookieToFail = bkutil.newBookie();
+    BookieServer replacementBookie = null;
+
+    try {
+      int ensembleSize = numBookies + 1;
+      assertEquals("New bookie didn't start",
+                   ensembleSize, bkutil.checkBookiesUp(ensembleSize, 10));
+
+      // ensure that the journal manager has to use all bookies,
+      // so that a failure will fail the journal manager
+      Configuration conf = new Configuration();
+      conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,
+                  ensembleSize);
+      conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,
+                  ensembleSize);
+      long txid = 1;
+      BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
+          BKJMUtil.createJournalURI("/hdfsjournal-allbookiefailure"));
+      EditLogOutputStream out = bkjm.startLogSegment(txid);
+
+      for (long i = 1 ; i <= 3; i++) {
+        FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
+        op.setTransactionId(txid++);
+        out.write(op);
+      }
+      out.setReadyToFlush();
+      out.flush();
+      bookieToFail.shutdown();
+      assertEquals("New bookie didn't die",
+                   numBookies, bkutil.checkBookiesUp(numBookies, 10));
+
+      try {
+        for (long i = 1 ; i <= 3; i++) {
+          FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
+          op.setTransactionId(txid++);
+          out.write(op);
+        }
+        out.setReadyToFlush();
+        out.flush();
+        fail("should not get to this stage");
+      } catch (IOException ioe) {
+        LOG.debug("Error writing to bookkeeper", ioe);
+        assertTrue("Invalid exception message",
+                   ioe.getMessage().contains("Failed to write to bookkeeper"));
+      }
+      replacementBookie = bkutil.newBookie();
+
+      assertEquals("New bookie didn't start",
+                   numBookies+1, bkutil.checkBookiesUp(numBookies+1, 10));
+      bkjm.recoverUnfinalizedSegments();
+      out = bkjm.startLogSegment(txid);
+      for (long i = 1 ; i <= 3; i++) {
+        FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
+        op.setTransactionId(txid++);
+        out.write(op);
+      }
+
+      out.setReadyToFlush();
+      out.flush();
+
+    } catch (Exception e) {
+      LOG.error("Exception in test", e);
+      throw e;
+    } finally {
+      if (replacementBookie != null) {
+        replacementBookie.shutdown();
+      }
+      bookieToFail.shutdown();
+
+      if (bkutil.checkBookiesUp(numBookies, 30) != numBookies) {
+        LOG.warn("Not all bookies from this test shut down, expect errors");
+      }
+    }
+  }
+
+  /**
+   * Test that a BookKeeper JM can continue to work across the
+   * failure of a bookie. This should be handled transparently
+   * by bookkeeper.
+   */
+  @Test
+  public void testOneBookieFailure() throws Exception {
+    BookieServer bookieToFail = bkutil.newBookie();
+    BookieServer replacementBookie = null;
+
+    try {
+      int ensembleSize = numBookies + 1;
+      assertEquals("New bookie didn't start",
+                   ensembleSize, bkutil.checkBookiesUp(ensembleSize, 10));
+
+      // ensure that the journal manager has to use all bookies,
+      // so that a failure will fail the journal manager
+      Configuration conf = new Configuration();
+      conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,
+                  ensembleSize);
+      conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,
+                  ensembleSize);
+      long txid = 1;
+      BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
+          BKJMUtil.createJournalURI("/hdfsjournal-onebookiefailure"));
+      EditLogOutputStream out = bkjm.startLogSegment(txid);
+      for (long i = 1 ; i <= 3; i++) {
+        FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
+        op.setTransactionId(txid++);
+        out.write(op);
+      }
+      out.setReadyToFlush();
+      out.flush();
+
+      replacementBookie = bkutil.newBookie();
+      assertEquals("replacement bookie didn't start",
+                   ensembleSize+1, bkutil.checkBookiesUp(ensembleSize+1, 10));
+      bookieToFail.shutdown();
+      assertEquals("New bookie didn't die",
+                   ensembleSize, bkutil.checkBookiesUp(ensembleSize, 10));
+
+      for (long i = 1 ; i <= 3; i++) {
+        FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
+        op.setTransactionId(txid++);
+        out.write(op);
+      }
+      out.setReadyToFlush();
+      out.flush();
+    } catch (Exception e) {
+      LOG.error("Exception in test", e);
+      throw e;
+    } finally {
+      if (replacementBookie != null) {
+        replacementBookie.shutdown();
+      }
+      bookieToFail.shutdown();
+
+      if (bkutil.checkBookiesUp(numBookies, 30) != numBookies) {
+        LOG.warn("Not all bookies from this test shut down, expect errors");
+      }
+    }
+  }
+  
+  /**
+   * If a journal manager has an empty inprogress node, ensure that we throw an
+   * error, as this should not be possible, and some third party has corrupted
+   * the zookeeper state
+   */
+  @Test
+  public void testEmptyInprogressNode() throws Exception {
+    URI uri = BKJMUtil.createJournalURI("/hdfsjournal-emptyInprogress");
+    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri);
+
+    EditLogOutputStream out = bkjm.startLogSegment(1);
+    for (long i = 1; i <= 100; i++) {
+      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
+      op.setTransactionId(i);
+      out.write(op);
+    }
+    out.close();
+    bkjm.finalizeLogSegment(1, 100);
+
+    out = bkjm.startLogSegment(101);
+    out.close();
+    bkjm.close();
+    String inprogressZNode = bkjm.inprogressZNode(101);
+    zkc.setData(inprogressZNode, new byte[0], -1);
+
+    bkjm = new BookKeeperJournalManager(conf, uri);
+    try {
+      bkjm.recoverUnfinalizedSegments();
+      fail("Should have failed. There should be no way of creating"
+          + " an empty inprogess znode");
+    } catch (IOException e) {
+      // correct behaviour
+      assertTrue("Exception different than expected", e.getMessage().contains(
+          "Invalid ledger entry,"));
+    } finally {
+      bkjm.close();
+    }
+  }
+
+  /**
+   * If a journal manager has an corrupt inprogress node, ensure that we throw
+   * an error, as this should not be possible, and some third party has
+   * corrupted the zookeeper state
+   */
+  @Test
+  public void testCorruptInprogressNode() throws Exception {
+    URI uri = BKJMUtil.createJournalURI("/hdfsjournal-corruptInprogress");
+    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri);
+
+    EditLogOutputStream out = bkjm.startLogSegment(1);
+    for (long i = 1; i <= 100; i++) {
+      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
+      op.setTransactionId(i);
+      out.write(op);
+    }
+    out.close();
+    bkjm.finalizeLogSegment(1, 100);
+
+    out = bkjm.startLogSegment(101);
+    out.close();
+    bkjm.close();
+
+    String inprogressZNode = bkjm.inprogressZNode(101);
+    zkc.setData(inprogressZNode, "WholeLottaJunk".getBytes(), -1);
+
+    bkjm = new BookKeeperJournalManager(conf, uri);
+    try {
+      bkjm.recoverUnfinalizedSegments();
+      fail("Should have failed. There should be no way of creating"
+          + " an empty inprogess znode");
+    } catch (IOException e) {
+      // correct behaviour
+      assertTrue("Exception different than expected", e.getMessage().contains(
+          "Invalid ledger entry,"));
+
+    } finally {
+      bkjm.close();
+    }
+  }
+
+  /**
+   * Cases can occur where we create a segment but crash before we even have the
+   * chance to write the START_SEGMENT op. If this occurs we should warn, but
+   * load as normal
+   */
+  @Test
+  public void testEmptyInprogressLedger() throws Exception {
+    URI uri = BKJMUtil.createJournalURI("/hdfsjournal-emptyInprogressLedger");
+    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri);
+
+    EditLogOutputStream out = bkjm.startLogSegment(1);
+    for (long i = 1; i <= 100; i++) {
+      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
+      op.setTransactionId(i);
+      out.write(op);
+    }
+    out.close();
+    bkjm.finalizeLogSegment(1, 100);
+
+    out = bkjm.startLogSegment(101);
+    out.close();
+    bkjm.close();
+
+    bkjm = new BookKeeperJournalManager(conf, uri);
+    bkjm.recoverUnfinalizedSegments();
+    out = bkjm.startLogSegment(101);
+    for (long i = 1; i <= 100; i++) {
+      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
+      op.setTransactionId(i);
+      out.write(op);
+    }
+    out.close();
+    bkjm.finalizeLogSegment(101, 200);
+
+    bkjm.close();
+  }
+
+  /**
+   * Test that if we fail between finalizing an inprogress and deleting the
+   * corresponding inprogress znode.
+   */
+  @Test
+  public void testRefinalizeAlreadyFinalizedInprogress() throws Exception {
+    URI uri = BKJMUtil
+        .createJournalURI("/hdfsjournal-refinalizeInprogressLedger");
+    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri);
+
+    EditLogOutputStream out = bkjm.startLogSegment(1);
+    for (long i = 1; i <= 100; i++) {
+      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
+      op.setTransactionId(i);
+      out.write(op);
+    }
+    out.close();
+    bkjm.close();
+
+    String inprogressZNode = bkjm.inprogressZNode(1);
+    String finalizedZNode = bkjm.finalizedLedgerZNode(1, 100);
+    assertNotNull("inprogress znode doesn't exist", zkc.exists(inprogressZNode,
+        null));
+    assertNull("finalized znode exists", zkc.exists(finalizedZNode, null));
+
+    byte[] inprogressData = zkc.getData(inprogressZNode, false, null);
+
+    // finalize
+    bkjm = new BookKeeperJournalManager(conf, uri);
+    bkjm.recoverUnfinalizedSegments();
+    bkjm.close();
+
+    assertNull("inprogress znode exists", zkc.exists(inprogressZNode, null));
+    assertNotNull("finalized znode doesn't exist", zkc.exists(finalizedZNode,
+        null));
+
+    zkc.create(inprogressZNode, inprogressData, Ids.OPEN_ACL_UNSAFE,
+        CreateMode.PERSISTENT);
+
+    // should work fine
+    bkjm = new BookKeeperJournalManager(conf, uri);
+    bkjm.recoverUnfinalizedSegments();
+    bkjm.close();
+  }
+
+  /**
+   * Tests that the edit log file meta data reading from ZooKeeper should be
+   * able to handle the NoNodeException. bkjm.getInputStream(fromTxId,
+   * inProgressOk) should suppress the NoNodeException and continue. HDFS-3441.
+   */
+  @Test
+  public void testEditLogFileNotExistsWhenReadingMetadata() throws Exception {
+    URI uri = BKJMUtil.createJournalURI("/hdfsjournal-editlogfile");
+    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri);
+    try {
+      // start new inprogress log segment with txid=1
+      // and write transactions till txid=50
+      String zkpath1 = startAndFinalizeLogSegment(bkjm, 1, 50);
+
+      // start new inprogress log segment with txid=51
+      // and write transactions till txid=100
+      String zkpath2 = startAndFinalizeLogSegment(bkjm, 51, 100);
+
+      // read the metadata from ZK. Here simulating the situation
+      // when reading,the edit log metadata can be removed by purger thread.
+      ZooKeeper zkspy = spy(BKJMUtil.connectZooKeeper());
+      bkjm.setZooKeeper(zkspy);
+      Mockito.doThrow(
+          new KeeperException.NoNodeException(zkpath2 + " doesn't exists"))
+          .when(zkspy).getData(zkpath2, false, null);
+
+      List<EditLogLedgerMetadata> ledgerList = bkjm.getLedgerList(false);
+      assertEquals("List contains the metadata of non exists path.", 1,
+          ledgerList.size());
+      assertEquals("LogLedgerMetadata contains wrong zk paths.", zkpath1,
+          ledgerList.get(0).getZkPath());
+    } finally {
+      bkjm.close();
+    }
+  }
+
+  private String startAndFinalizeLogSegment(BookKeeperJournalManager bkjm,
+      int startTxid, int endTxid) throws IOException, KeeperException,
+      InterruptedException {
+    EditLogOutputStream out = bkjm.startLogSegment(startTxid);
+    for (long i = startTxid; i <= endTxid; i++) {
+      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
+      op.setTransactionId(i);
+      out.write(op);
+    }
+    out.close();
+    // finalize the inprogress_1 log segment.
+    bkjm.finalizeLogSegment(startTxid, endTxid);
+    String zkpath1 = bkjm.finalizedLedgerZNode(startTxid, endTxid);
+    assertNotNull(zkc.exists(zkpath1, false));
+    assertNull(zkc.exists(bkjm.inprogressZNode(startTxid), false));
+    return zkpath1;
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestCurrentInprogress.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestCurrentInprogress.java
new file mode 100644
index 0000000..00497b7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestCurrentInprogress.java
@@ -0,0 +1,157 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.contrib.bkjournal;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.bookkeeper.util.LocalBookKeeper;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.server.NIOServerCnxnFactory;
+import org.apache.zookeeper.server.ZooKeeperServer;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Tests that read, update, clear api from CurrentInprogress
+ */
+public class TestCurrentInprogress {
+  private static final Log LOG = LogFactory.getLog(TestCurrentInprogress.class);
+  private static final String CURRENT_NODE_PATH = "/test";
+  private static final String HOSTPORT = "127.0.0.1:2181";
+  private static final int CONNECTION_TIMEOUT = 30000;
+  private static NIOServerCnxnFactory serverFactory;
+  private static ZooKeeperServer zks;
+  private static ZooKeeper zkc;
+  private static int ZooKeeperDefaultPort = 2181;
+  private static File zkTmpDir;
+
+  private static ZooKeeper connectZooKeeper(String ensemble)
+      throws IOException, KeeperException, InterruptedException {
+    final CountDownLatch latch = new CountDownLatch(1);
+
+    ZooKeeper zkc = new ZooKeeper(HOSTPORT, 3600, new Watcher() {
+      public void process(WatchedEvent event) {
+        if (event.getState() == Watcher.Event.KeeperState.SyncConnected) {
+          latch.countDown();
+        }
+      }
+    });
+    if (!latch.await(10, TimeUnit.SECONDS)) {
+      throw new IOException("Zookeeper took too long to connect");
+    }
+    return zkc;
+  }
+
+  @BeforeClass
+  public static void setupZooKeeper() throws Exception {
+    LOG.info("Starting ZK server");
+    zkTmpDir = File.createTempFile("zookeeper", "test");
+    zkTmpDir.delete();
+    zkTmpDir.mkdir();
+    try {
+      zks = new ZooKeeperServer(zkTmpDir, zkTmpDir, ZooKeeperDefaultPort);
+      serverFactory = new NIOServerCnxnFactory();
+      serverFactory.configure(new InetSocketAddress(ZooKeeperDefaultPort), 10);
+      serverFactory.startup(zks);
+    } catch (Exception e) {
+      LOG.error("Exception while instantiating ZooKeeper", e);
+    }
+    boolean b = LocalBookKeeper.waitForServerUp(HOSTPORT, CONNECTION_TIMEOUT);
+    LOG.debug("ZooKeeper server up: " + b);
+  }
+
+  @AfterClass
+  public static void shutDownServer() {
+    if (null != zks) {
+      zks.shutdown();
+    }
+    zkTmpDir.delete();
+  }
+
+  @Before
+  public void setup() throws Exception {
+    zkc = connectZooKeeper(HOSTPORT);
+  }
+
+  @After
+  public void teardown() throws Exception {
+    if (null != zkc) {
+      zkc.close();
+    }
+
+  }
+
+  /**
+   * Tests that read should be able to read the data which updated with update
+   * api
+   */
+  @Test
+  public void testReadShouldReturnTheZnodePathAfterUpdate() throws Exception {
+    String data = "inprogressNode";
+    CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
+    ci.update(data);
+    String inprogressNodePath = ci.read();
+    assertEquals("Not returning inprogressZnode", "inprogressNode",
+        inprogressNodePath);
+  }
+
+  /**
+   * Tests that read should return null if we clear the updated data in
+   * CurrentInprogress node
+   */
+  @Test
+  public void testReadShouldReturnNullAfterClear() throws Exception {
+    CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
+    ci.update("myInprogressZnode");
+    ci.read();
+    ci.clear();
+    String inprogressNodePath = ci.read();
+    assertEquals("Expecting null to be return", null, inprogressNodePath);
+  }
+
+  /**
+   * Tests that update should throw IOE, if version number modifies between read
+   * and update
+   */
+  @Test(expected = IOException.class)
+  public void testUpdateShouldFailWithIOEIfVersionNumberChangedAfterRead()
+      throws Exception {
+    CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
+    ci.update("myInprogressZnode");
+    assertEquals("Not returning myInprogressZnode", "myInprogressZnode", ci
+        .read());
+    // Updating data in-between to change the data to change the version number
+    ci.update("YourInprogressZnode");
+    ci.update("myInprogressZnode");
+  }
+
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java
index 41f0292..0889bcd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java
@@ -34,6 +34,11 @@
   public static long countTransactionsInStream(EditLogInputStream in) 
       throws IOException {
     FSEditLogLoader.EditLogValidation validation = FSEditLogLoader.validateEditLog(in);
-    return validation.getNumTransactions();
+    return (validation.getEndTxId() - in.getFirstTxId()) + 1;
+  }
+
+  public static void setRuntimeForEditLog(NameNode nn, Runtime rt) {
+    nn.setRuntimeForTesting(rt);
+    nn.getFSImage().getEditLog().setRuntimeForTesting(rt);
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/pom.xml
index 0e61d8d..db27034 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/pom.xml
@@ -14,7 +14,10 @@
 
 
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/fuse_impls_truncate.c b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/fuse_impls_truncate.c
index 86cda78..7357b4c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/fuse_impls_truncate.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/fuse_impls_truncate.c
@@ -37,7 +37,7 @@
   assert(dfs);
 
   if (size != 0) {
-    return -ENOTSUP;
+    return 0;
   }
 
   int ret = dfs_unlink(path);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 40b3704..06b8b5a 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -30,6 +30,7 @@
   echo "  namenode -format     format the DFS filesystem"
   echo "  secondarynamenode    run the DFS secondary namenode"
   echo "  namenode             run the DFS namenode"
+  echo "  zkfc                 run the ZK Failover Controller daemon"
   echo "  datanode             run a DFS datanode"
   echo "  dfsadmin             run a DFS admin client"
   echo "  haadmin              run a DFS HA admin client"
@@ -56,21 +57,29 @@
 
 # Determine if we're starting a secure datanode, and if so, redefine appropriate variables
 if [ "$COMMAND" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  if [ -n "$HADOOP_SECURE_DN_PID_DIR" ]; then
-    HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
+  if [ -n "$JSVC_HOME" ]; then
+    if [ -n "$HADOOP_SECURE_DN_PID_DIR" ]; then
+      HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
+    fi
+  
+    if [ -n "$HADOOP_SECURE_DN_LOG_DIR" ]; then
+      HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
+    fi
+   
+    HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
+    starting_secure_dn="true"
+  else
+    echo "It looks like you're trying to start a secure DN, but \$JSVC_HOME"\
+      "isn't set. Falling back to starting insecure DN."
   fi
-
-  if [ -n "$HADOOP_SECURE_DN_LOG_DIR" ]; then
-    HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
-  fi
- 
-  HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
-  starting_secure_dn="true"
 fi
 
 if [ "$COMMAND" = "namenode" ] ; then
   CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode'
   HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_OPTS"
+elif [ "$COMMAND" = "zkfc" ] ; then
+  CLASS='org.apache.hadoop.hdfs.tools.DFSZKFailoverController'
+  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_ZKFC_OPTS"
 elif [ "$COMMAND" = "secondarynamenode" ] ; then
   CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
   HADOOP_OPTS="$HADOOP_OPTS $HADOOP_SECONDARYNAMENODE_OPTS"
@@ -125,12 +134,12 @@
   if [ "$HADOOP_PID_DIR" = "" ]; then
     HADOOP_SECURE_DN_PID="/tmp/hadoop_secure_dn.pid"
   else
-   HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
+    HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
   fi
 
   JSVC=$JSVC_HOME/jsvc
   if [ ! -f $JSVC ]; then
-    echo "JSVC_HOME is not set correctly so jsvc can not be found. Jsvc is required to run secure datanodes. "
+    echo "JSVC_HOME is not set correctly so jsvc cannot be found. Jsvc is required to run secure datanodes. "
     echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\
       "and set JSVC_HOME to the directory containing the jsvc binary."
     exit
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
index 72d9e90..0d41e55 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
@@ -85,4 +85,15 @@
       --script "$bin/hdfs" start secondarynamenode
 fi
 
+#---------------------------------------------------------
+# ZK Failover controllers, if auto-HA is enabled
+AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled)
+if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then
+  echo "Starting ZK Failover Controllers on NN hosts [$NAMENODES]"
+  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+    --config "$HADOOP_CONF_DIR" \
+    --hostnames "$NAMENODES" \
+    --script "$bin/hdfs" start zkfc
+fi
+
 # eof
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index f82f5a5..d132db7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -107,6 +107,8 @@
   public static final long    DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT = 40000;
   public static final String  DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY = "dfs.namenode.heartbeat.recheck-interval";
   public static final int     DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT = 5*60*1000;
+  public static final String  DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_KEY = "dfs.namenode.tolerate.heartbeat.multiplier";
+  public static final int     DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT = 4;
   public static final String  DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.client.https.keystore.resource";
   public static final String  DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-client.xml";
   public static final String  DFS_CLIENT_HTTPS_NEED_AUTH_KEY = "dfs.client.https.need-auth";
@@ -334,8 +336,8 @@
   public static final String  DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY = "dfs.namenode.name.cache.threshold";
   public static final int     DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT = 10;
   
-  public static final String  DFS_FEDERATION_NAMESERVICES = "dfs.federation.nameservices";
-  public static final String  DFS_FEDERATION_NAMESERVICE_ID = "dfs.federation.nameservice.id";
+  public static final String  DFS_NAMESERVICES = "dfs.nameservices";
+  public static final String  DFS_NAMESERVICE_ID = "dfs.nameservice.id";
   public static final String  DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY = "dfs.namenode.resource.check.interval";
   public static final int     DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT = 5000;
   public static final String  DFS_NAMENODE_DU_RESERVED_KEY = "dfs.namenode.resource.du.reserved";
@@ -358,4 +360,8 @@
   public static final String DFS_HA_TAILEDITS_PERIOD_KEY = "dfs.ha.tail-edits.period";
   public static final int DFS_HA_TAILEDITS_PERIOD_DEFAULT = 60; // 1m
   public static final String DFS_HA_FENCE_METHODS_KEY = "dfs.ha.fencing.methods";
+  public static final String DFS_HA_AUTO_FAILOVER_ENABLED_KEY = "dfs.ha.automatic-failover.enabled";
+  public static final boolean DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT = false;
+  public static final String DFS_HA_ZKFC_PORT_KEY = "dfs.ha.zkfc.port";
+  public static final int DFS_HA_ZKFC_PORT_DEFAULT = 8019;
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 4dc0f09..f7d6fdc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -129,11 +129,13 @@
   private long initialFileSize = 0; // at time of file open
   private Progressable progress;
   private final short blockReplication; // replication factor of file
+  private boolean shouldSyncBlock = false; // force blocks to disk upon close
   
   private class Packet {
     long    seqno;               // sequencenumber of buffer in block
     long    offsetInBlock;       // offset in block
-    boolean lastPacketInBlock;   // is this the last packet in block?
+    private boolean lastPacketInBlock;   // is this the last packet in block?
+    boolean syncBlock;          // this packet forces the current block to disk
     int     numChunks;           // number of chunks currently in packet
     int     maxChunks;           // max chunks in packet
 
@@ -245,7 +247,7 @@
       buffer.mark();
 
       PacketHeader header = new PacketHeader(
-        pktLen, offsetInBlock, seqno, lastPacketInBlock, dataLen);
+        pktLen, offsetInBlock, seqno, lastPacketInBlock, dataLen, syncBlock);
       header.putInBuffer(buffer);
       
       buffer.reset();
@@ -507,8 +509,15 @@
           }
 
           // write out data to remote datanode
-          blockStream.write(buf.array(), buf.position(), buf.remaining());
-          blockStream.flush();
+          try {            
+            blockStream.write(buf.array(), buf.position(), buf.remaining());
+            blockStream.flush();   
+          } catch (IOException e) {
+            // HDFS-3398 treat primary DN is down since client is unable to 
+            // write to primary DN 
+            errorIndex = 0;
+            throw e;
+          }
           lastPacket = System.currentTimeMillis();
           
           if (one.isHeartbeatPacket()) {  //heartbeat packet
@@ -965,6 +974,7 @@
       DatanodeInfo[] nodes = null;
       int count = dfsClient.getConf().nBlockWriteRetry;
       boolean success = false;
+      ExtendedBlock oldBlock = block;
       do {
         hasError = false;
         lastException = null;
@@ -972,9 +982,11 @@
         success = false;
 
         long startTime = System.currentTimeMillis();
-        DatanodeInfo[] w = excludedNodes.toArray(
+        DatanodeInfo[] excluded = excludedNodes.toArray(
             new DatanodeInfo[excludedNodes.size()]);
-        lb = locateFollowingBlock(startTime, w.length > 0 ? w : null);
+        block = oldBlock;
+        lb = locateFollowingBlock(startTime,
+            excluded.length > 0 ? excluded : null);
         block = lb.getBlock();
         block.setNumBytes(0);
         accessToken = lb.getBlockToken();
@@ -1239,6 +1251,7 @@
       long blockSize, Progressable progress, int buffersize,
       DataChecksum checksum) throws IOException {
     this(dfsClient, src, blockSize, progress, checksum, replication);
+    this.shouldSyncBlock = flag.contains(CreateFlag.SYNC_BLOCK);
 
     computePacketChunkSize(dfsClient.getConf().writePacketSize,
         checksum.getBytesPerChecksum());
@@ -1421,6 +1434,7 @@
         currentPacket = new Packet(PacketHeader.PKT_HEADER_LEN, 0, 
             bytesCurBlock);
         currentPacket.lastPacketInBlock = true;
+        currentPacket.syncBlock = shouldSyncBlock;
         waitAndQueueCurrentPacket();
         bytesCurBlock = 0;
         lastFlushOffset = 0;
@@ -1440,6 +1454,24 @@
    */
   @Override
   public void hflush() throws IOException {
+    flushOrSync(false);
+  }
+
+  /**
+   * The expected semantics is all data have flushed out to all replicas 
+   * and all replicas have done posix fsync equivalent - ie the OS has 
+   * flushed it to the disk device (but the disk may have it in its cache).
+   * 
+   * Note that only the current block is flushed to the disk device.
+   * To guarantee durable sync across block boundaries the stream should
+   * be created with {@link CreateFlag#SYNC_BLOCK}.
+   */
+  @Override
+  public void hsync() throws IOException {
+    flushOrSync(true);
+  }
+
+  private void flushOrSync(boolean isSync) throws IOException {
     dfsClient.checkOpen();
     isClosed();
     try {
@@ -1467,7 +1499,13 @@
           assert bytesCurBlock > lastFlushOffset;
           // record the valid offset of this flush
           lastFlushOffset = bytesCurBlock;
-          waitAndQueueCurrentPacket();
+          if (isSync && currentPacket == null) {
+            // Nothing to send right now,
+            // but sync was requested.
+            // Send an empty packet
+            currentPacket = new Packet(packetSize, chunksPerPacket,
+                bytesCurBlock);
+          }
         } else {
           // We already flushed up to this offset.
           // This means that we haven't written anything since the last flush
@@ -1477,8 +1515,21 @@
           assert oldCurrentPacket == null :
             "Empty flush should not occur with a currentPacket";
 
-          // just discard the current packet since it is already been sent.
-          currentPacket = null;
+          if (isSync && bytesCurBlock > 0) {
+            // Nothing to send right now,
+            // and the block was partially written,
+            // and sync was requested.
+            // So send an empty sync packet.
+            currentPacket = new Packet(packetSize, chunksPerPacket,
+                bytesCurBlock);
+          } else {
+            // just discard the current packet since it is already been sent.
+            currentPacket = null;
+          }
+        }
+        if (currentPacket != null) {
+          currentPacket.syncBlock = isSync;
+          waitAndQueueCurrentPacket();          
         }
         // Restore state of stream. Record the last flush offset 
         // of the last full chunk that was flushed.
@@ -1530,18 +1581,6 @@
   }
 
   /**
-   * The expected semantics is all data have flushed out to all replicas 
-   * and all replicas have done posix fsync equivalent - ie the OS has 
-   * flushed it to the disk device (but the disk may have it in its cache).
-   * 
-   * Right now by default it is implemented as hflush
-   */
-  @Override
-  public synchronized void hsync() throws IOException {
-    hflush();
-  }
-
-  /**
    * @deprecated use {@link HdfsDataOutputStream#getCurrentBlockReplication()}.
    */
   @Deprecated
@@ -1665,6 +1704,7 @@
         currentPacket = new Packet(PacketHeader.PKT_HEADER_LEN, 0, 
             bytesCurBlock);
         currentPacket.lastPacketInBlock = true;
+        currentPacket.syncBlock = shouldSyncBlock;
       }
 
       flushInternal();             // flush all data to Datanodes
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 0a8751d..dc266fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -42,6 +42,7 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -288,7 +289,7 @@
    * @return collection of nameservice Ids, or null if not specified
    */
   public static Collection<String> getNameServiceIds(Configuration conf) {
-    return conf.getTrimmedStringCollection(DFS_FEDERATION_NAMESERVICES);
+    return conf.getTrimmedStringCollection(DFS_NAMESERVICES);
   }
 
   /**
@@ -609,6 +610,14 @@
   public static Collection<URI> getNameServiceUris(Configuration conf,
       String... keys) {
     Set<URI> ret = new HashSet<URI>();
+    
+    // We're passed multiple possible configuration keys for any given NN or HA
+    // nameservice, and search the config in order of these keys. In order to
+    // make sure that a later config lookup (e.g. fs.defaultFS) doesn't add a
+    // URI for a config key for which we've already found a preferred entry, we
+    // keep track of non-preferred keys here.
+    Set<URI> nonPreferredUris = new HashSet<URI>();
+    
     for (String nsId : getNameServiceIds(conf)) {
       if (HAUtil.isHAEnabled(conf, nsId)) {
         // Add the logical URI of the nameservice.
@@ -619,24 +628,46 @@
         }
       } else {
         // Add the URI corresponding to the address of the NN.
+        boolean uriFound = false;
         for (String key : keys) {
           String addr = conf.get(concatSuffixes(key, nsId));
           if (addr != null) {
-            ret.add(createUri(HdfsConstants.HDFS_URI_SCHEME,
-                NetUtils.createSocketAddr(addr)));
-            break;
+            URI uri = createUri(HdfsConstants.HDFS_URI_SCHEME,
+                NetUtils.createSocketAddr(addr));
+            if (!uriFound) {
+              uriFound = true;
+              ret.add(uri);
+            } else {
+              nonPreferredUris.add(uri);
+            }
           }
         }
       }
     }
+    
     // Add the generic configuration keys.
+    boolean uriFound = false;
     for (String key : keys) {
       String addr = conf.get(key);
       if (addr != null) {
-        ret.add(createUri("hdfs", NetUtils.createSocketAddr(addr)));
-        break;
+        URI uri = createUri("hdfs", NetUtils.createSocketAddr(addr));
+        if (!uriFound) {
+          uriFound = true;
+          ret.add(uri);
+        } else {
+          nonPreferredUris.add(uri);
+        }
       }
     }
+    
+    // Add the default URI if it is an HDFS URI.
+    URI defaultUri = FileSystem.getDefaultUri(conf);
+    if (defaultUri != null &&
+        HdfsConstants.HDFS_URI_SCHEME.equals(defaultUri.getScheme()) &&
+        !nonPreferredUris.contains(defaultUri)) {
+      ret.add(defaultUri);
+    }
+    
     return ret;
   }
 
@@ -676,9 +707,10 @@
    * @param httpsAddress -If true, and if security is enabled, returns server 
    *                      https address. If false, returns server http address.
    * @return server http or https address
+   * @throws IOException 
    */
-  public static String getInfoServer(
-      InetSocketAddress namenodeAddr, Configuration conf, boolean httpsAddress) {
+  public static String getInfoServer(InetSocketAddress namenodeAddr,
+      Configuration conf, boolean httpsAddress) throws IOException {
     boolean securityOn = UserGroupInformation.isSecurityEnabled();
     String httpAddressKey = (securityOn && httpsAddress) ? 
         DFS_NAMENODE_HTTPS_ADDRESS_KEY : DFS_NAMENODE_HTTP_ADDRESS_KEY;
@@ -695,8 +727,14 @@
     } else {
       suffixes = new String[2];
     }
-
-    return getSuffixedConf(conf, httpAddressKey, httpAddressDefault, suffixes);
+    String configuredInfoAddr = getSuffixedConf(conf, httpAddressKey,
+        httpAddressDefault, suffixes);
+    if (namenodeAddr != null) {
+      return substituteForWildcardAddress(configuredInfoAddr,
+          namenodeAddr.getHostName());
+    } else {
+      return configuredInfoAddr;
+    }
   }
   
 
@@ -721,7 +759,7 @@
       if (UserGroupInformation.isSecurityEnabled() &&
           defaultSockAddr.getAddress().isAnyLocalAddress()) {
         throw new IOException("Cannot use a wildcard address with security. " +
-                              "Must explicitly set bind address for Kerberos");
+            "Must explicitly set bind address for Kerberos");
       }
       return defaultHost + ":" + sockAddr.getPort();
     } else {
@@ -843,7 +881,7 @@
    * Get the nameservice Id by matching the {@code addressKey} with the
    * the address of the local node. 
    * 
-   * If {@link DFSConfigKeys#DFS_FEDERATION_NAMESERVICE_ID} is not specifically
+   * If {@link DFSConfigKeys#DFS_NAMESERVICE_ID} is not specifically
    * configured, and more than one nameservice Id is configured, this method 
    * determines the nameservice Id by matching the local node's address with the
    * configured addresses. When a match is found, it returns the nameservice Id
@@ -855,7 +893,7 @@
    * @throws HadoopIllegalArgumentException on error
    */
   private static String getNameServiceId(Configuration conf, String addressKey) {
-    String nameserviceId = conf.get(DFS_FEDERATION_NAMESERVICE_ID);
+    String nameserviceId = conf.get(DFS_NAMESERVICE_ID);
     if (nameserviceId != null) {
       return nameserviceId;
     }
@@ -927,7 +965,7 @@
     if (found > 1) { // Only one address must match the local address
       String msg = "Configuration has multiple addresses that match "
           + "local node's address. Please configure the system with "
-          + DFS_FEDERATION_NAMESERVICE_ID + " and "
+          + DFS_NAMESERVICE_ID + " and "
           + DFS_HA_NAMENODE_ID_KEY;
       throw new HadoopIllegalArgumentException(msg);
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 0cb95c0..1de353b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -223,12 +223,19 @@
 
   @Override
   public HdfsDataOutputStream create(Path f, FsPermission permission,
-    boolean overwrite, int bufferSize, short replication, long blockSize,
+      boolean overwrite, int bufferSize, short replication, long blockSize,
+      Progressable progress) throws IOException {
+    return create(f, permission,
+        overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
+            : EnumSet.of(CreateFlag.CREATE), bufferSize, replication,
+        blockSize, progress);
+  }
+  
+  @Override
+  public HdfsDataOutputStream create(Path f, FsPermission permission,
+    EnumSet<CreateFlag> cflags, int bufferSize, short replication, long blockSize,
     Progressable progress) throws IOException {
     statistics.incrementWriteOps(1);
-    final EnumSet<CreateFlag> cflags = overwrite?
-        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
-        : EnumSet.of(CreateFlag.CREATE);
     final DFSOutputStream out = dfs.create(getPathName(f), permission, cflags,
         replication, blockSize, progress, bufferSize);
     return new HdfsDataOutputStream(out, statistics);
@@ -249,6 +256,7 @@
   /**
    * Same as create(), except fails if parent directory doesn't already exist.
    */
+  @Override
   public HdfsDataOutputStream createNonRecursive(Path f, FsPermission permission,
       EnumSet<CreateFlag> flag, int bufferSize, short replication,
       long blockSize, Progressable progress) throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
index b568925..77f0597 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
@@ -142,7 +142,7 @@
     Preconditions.checkArgument(nsId != null,
         "Could not determine namespace id. Please ensure that this " +
         "machine is one of the machines listed as a NN RPC address, " +
-        "or configure " + DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID);
+        "or configure " + DFSConfigKeys.DFS_NAMESERVICE_ID);
     
     Collection<String> nnIds = DFSUtil.getNameNodeIds(myConf, nsId);
     String myNNId = myConf.get(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
index 6e21245..e8e80a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
@@ -20,6 +20,7 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.ha.ZKFCProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -47,6 +48,8 @@
     new Service("security.namenode.protocol.acl", NamenodeProtocol.class),
     new Service(CommonConfigurationKeys.SECURITY_HA_SERVICE_PROTOCOL_ACL,
         HAServiceProtocol.class),
+    new Service(CommonConfigurationKeys.SECURITY_ZKFC_PROTOCOL_ACL,
+        ZKFCProtocol.class),
     new Service(
         CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_POLICY, 
         RefreshAuthorizationPolicyProtocol.class),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
index 1454fdb..022cf58 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
@@ -63,7 +63,7 @@
   }
 
   private static void deprecate(String oldKey, String newKey) {
-    Configuration.addDeprecation(oldKey, new String[]{newKey});
+    Configuration.addDeprecation(oldKey, newKey);
   }
 
   private static void addDeprecatedKeys() {
@@ -102,5 +102,7 @@
     deprecate("dfs.block.size", DFSConfigKeys.DFS_BLOCK_SIZE_KEY);
     deprecate("dfs.datanode.max.xcievers", DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY);
     deprecate("io.bytes.per.checksum", DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY);
+    deprecate("dfs.federation.nameservices", DFSConfigKeys.DFS_NAMESERVICES);
+    deprecate("dfs.federation.nameservice.id", DFSConfigKeys.DFS_NAMESERVICE_ID);
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
index 33c86f9..20324be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
@@ -214,6 +214,17 @@
     }
     return compareTo((Block)o) == 0;
   }
+  
+  /**
+   * @return true if the two blocks have the same block ID and the same
+   * generation stamp, or if both blocks are null.
+   */
+  public static boolean matchingIdAndGenStamp(Block a, Block b) {
+    if (a == b) return true; // same block, or both null
+    if (a == null || b == null) return false; // only one null
+    return a.blockId == b.blockId &&
+           a.generationStamp == b.generationStamp;
+  }
 
   @Override // Object
   public int hashCode() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 26f309a..9a02465 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -309,6 +309,7 @@
    * @throws UnresolvedLinkException If <code>src</code> contains a symlink
    * @throws IOException If an I/O error occurred
    */
+  @Idempotent
   public LocatedBlock addBlock(String src, String clientName,
       ExtendedBlock previous, DatanodeInfo[] excludeNodes)
       throws AccessControlException, FileNotFoundException,
@@ -362,6 +363,7 @@
    * @throws UnresolvedLinkException If <code>src</code> contains a symlink 
    * @throws IOException If an I/O error occurred
    */
+  @Idempotent
   public boolean complete(String src, String clientName, ExtendedBlock last)
       throws AccessControlException, FileNotFoundException, SafeModeException,
       UnresolvedLinkException, IOException;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
index 464fd614d2..f4c0715 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
@@ -20,7 +20,6 @@
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 
 /**
  * This class represents the primary identifier for a Datanode.
@@ -45,23 +44,6 @@
   protected int infoPort;      // info server port
   protected int ipcPort;       // IPC server port
 
-  public DatanodeID(String ipAddr, int xferPort) {
-    this(ipAddr, "", "", xferPort,
-        DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
-        DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
-  }
-
-  public DatanodeID(String ipAddr, String hostName, int xferPort) {
-    this(ipAddr, hostName, "", xferPort,
-        DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
-        DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
-  }
-
-  /**
-   * DatanodeID copy constructor
-   * 
-   * @param from
-   */
   public DatanodeID(DatanodeID from) {
     this(from.getIpAddr(),
         from.getHostName(),
@@ -72,7 +54,7 @@
   }
   
   /**
-   * Create DatanodeID
+   * Create a DatanodeID
    * @param ipAddr IP
    * @param hostName hostname
    * @param storageID data storage ID
@@ -94,22 +76,6 @@
     this.ipAddr = ipAddr;
   }
 
-  public void setHostName(String hostName) {
-    this.hostName = hostName;
-  }
-
-  public void setXferPort(int xferPort) {
-    this.xferPort = xferPort;
-  }
-
-  public void setInfoPort(int infoPort) {
-    this.infoPort = infoPort;
-  }
-  
-  public void setIpcPort(int ipcPort) {
-    this.ipcPort = ipcPort;
-  }
-
   public void setStorageID(String storageID) {
     this.storageID = storageID;
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
index c905e53..d9da5b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
@@ -22,11 +22,11 @@
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.security.token.Token;
 
-/****************************************************
- * A LocatedBlock is a pair of Block, DatanodeInfo[]
- * objects.  It tells where to find a Block.
- * 
- ****************************************************/
+/**
+ * Associates a block with the Datanodes that contain its replicas
+ * and other block metadata (E.g. the file offset associated with this
+ * block, whether it is corrupt, security token, etc).
+ */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class LocatedBlock {
@@ -40,19 +40,6 @@
   private boolean corrupt;
   private Token<BlockTokenIdentifier> blockToken = new Token<BlockTokenIdentifier>();
 
-  public LocatedBlock() {
-    this(new ExtendedBlock(), new DatanodeInfo[0], 0L, false);
-  }
-  
-
-  public LocatedBlock(ExtendedBlock eb) {
-    this(eb, new DatanodeInfo[0], 0L, false);
-  }
-
-  public LocatedBlock(String bpid, Block b, DatanodeInfo[] locs) {
-    this(new ExtendedBlock(bpid, b), locs, -1, false); // startOffset is unknown
-  }
-
   public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs) {
     this(b, locs, -1, false); // startOffset is unknown
   }
@@ -81,14 +68,10 @@
     this.blockToken = token;
   }
 
-  /**
-   */
   public ExtendedBlock getBlock() {
     return b;
   }
 
-  /**
-   */
   public DatanodeInfo[] getLocations() {
     return locs;
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
index 72aa47f..fbe8690 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
@@ -105,8 +105,9 @@
    * @return block if found, or null otherwise.
    */
   public int findBlock(long offset) {
-    // create fake block of size 1 as a key
-    LocatedBlock key = new LocatedBlock();
+    // create fake block of size 0 as a key
+    LocatedBlock key = new LocatedBlock(
+        new ExtendedBlock(), new DatanodeInfo[0], 0L, false);
     key.setStartOffset(offset);
     key.getBlock().setNumBytes(1);
     Comparator<LocatedBlock> comp = 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java
index d8b9f2b..083e2b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java
@@ -40,6 +40,7 @@
       .setSeqno(0)
       .setLastPacketInBlock(false)
       .setDataLen(0)
+      .setSyncBlock(false)
       .build().getSerializedSize();
   public static final int PKT_HEADER_LEN =
     6 + PROTO_SIZE;
@@ -51,13 +52,14 @@
   }
 
   public PacketHeader(int packetLen, long offsetInBlock, long seqno,
-                      boolean lastPacketInBlock, int dataLen) {
+                      boolean lastPacketInBlock, int dataLen, boolean syncBlock) {
     this.packetLen = packetLen;
     proto = PacketHeaderProto.newBuilder()
       .setOffsetInBlock(offsetInBlock)
       .setSeqno(seqno)
       .setLastPacketInBlock(lastPacketInBlock)
       .setDataLen(dataLen)
+      .setSyncBlock(syncBlock)
       .build();
   }
 
@@ -81,6 +83,10 @@
     return packetLen;
   }
 
+  public boolean getSyncBlock() {
+    return proto.getSyncBlock();
+  }
+
   @Override
   public String toString() {
     return "PacketHeader with packetLen=" + packetLen +
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolPB.java
index 738ae04..542a1f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolPB.java
@@ -20,9 +20,13 @@
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService;
 import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.security.KerberosInfo;
 
+@KerberosInfo(
+    serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
 @ProtocolInfo(
     protocolName = "org.apache.hadoop.tools.GetUserMappingsProtocol", 
     protocolVersion = 1)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
index bcb344d..c3466e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
@@ -32,6 +32,8 @@
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto;
@@ -104,6 +106,20 @@
     }
     return GetTransactionIdResponseProto.newBuilder().setTxId(txid).build();
   }
+  
+  @Override
+  public GetMostRecentCheckpointTxIdResponseProto getMostRecentCheckpointTxId(
+      RpcController unused, GetMostRecentCheckpointTxIdRequestProto request)
+      throws ServiceException {
+    long txid;
+    try {
+      txid = impl.getMostRecentCheckpointTxId();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return GetMostRecentCheckpointTxIdResponseProto.newBuilder().setTxId(txid).build();
+  }
+
 
   @Override
   public RollEditLogResponseProto rollEditLog(RpcController unused,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
index b802457..f48e994 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
@@ -31,6 +31,7 @@
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto;
@@ -120,6 +121,16 @@
   }
 
   @Override
+  public long getMostRecentCheckpointTxId() throws IOException {
+    try {
+      return rpcProxy.getMostRecentCheckpointTxId(NULL_CONTROLLER,
+          GetMostRecentCheckpointTxIdRequestProto.getDefaultInstance()).getTxId();
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
   public CheckpointSignature rollEditLog() throws IOException {
     try {
       return PBHelper.convert(rpcProxy.rollEditLog(NULL_CONTROLLER,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 92b7858..93fe249 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -254,11 +254,11 @@
   public static BlockWithLocationsProto convert(BlockWithLocations blk) {
     return BlockWithLocationsProto.newBuilder()
         .setBlock(convert(blk.getBlock()))
-        .addAllDatanodeIDs(Arrays.asList(blk.getDatanodes())).build();
+        .addAllStorageIDs(Arrays.asList(blk.getStorageIDs())).build();
   }
 
   public static BlockWithLocations convert(BlockWithLocationsProto b) {
-    return new BlockWithLocations(convert(b.getBlock()), b.getDatanodeIDsList()
+    return new BlockWithLocations(convert(b.getBlock()), b.getStorageIDsList()
         .toArray(new String[0]));
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index a63c3b8..0576f89 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -205,6 +205,7 @@
   private Map<Block, BalancerBlock> globalBlockList
                  = new HashMap<Block, BalancerBlock>();
   private MovedBlocks movedBlocks = new MovedBlocks();
+  // Map storage IDs to BalancerDatanodes
   private Map<String, BalancerDatanode> datanodes
                  = new HashMap<String, BalancerDatanode>();
   
@@ -262,9 +263,9 @@
               if (LOG.isDebugEnabled()) {
                 LOG.debug("Decided to move block "+ block.getBlockId()
                     +" with a length of "+StringUtils.byteDesc(block.getNumBytes())
-                    + " bytes from " + source.getName() 
-                    + " to " + target.getName()
-                    + " using proxy source " + proxySource.getName() );
+                    + " bytes from " + source.getDisplayName()
+                    + " to " + target.getDisplayName()
+                    + " using proxy source " + proxySource.getDisplayName() );
               }
               return true;
             }
@@ -317,15 +318,15 @@
         receiveResponse(in);
         bytesMoved.inc(block.getNumBytes());
         LOG.info( "Moving block " + block.getBlock().getBlockId() +
-              " from "+ source.getName() + " to " +
-              target.getName() + " through " +
-              proxySource.getName() +
+              " from "+ source.getDisplayName() + " to " +
+              target.getDisplayName() + " through " +
+              proxySource.getDisplayName() +
               " is succeeded." );
       } catch (IOException e) {
         LOG.warn("Error moving block "+block.getBlockId()+
-            " from " + source.getName() + " to " +
-            target.getName() + " through " +
-            proxySource.getName() +
+            " from " + source.getDisplayName() + " to " +
+            target.getDisplayName() + " through " +
+            proxySource.getDisplayName() +
             ": "+e.getMessage());
       } finally {
         IOUtils.closeStream(out);
@@ -378,7 +379,8 @@
         public void run() {
           if (LOG.isDebugEnabled()) {
             LOG.debug("Starting moving "+ block.getBlockId() +
-                " from " + proxySource.getName() + " to " + target.getName());
+                " from " + proxySource.getDisplayName() + " to " +
+                target.getDisplayName());
           }
           dispatch();
         }
@@ -475,7 +477,7 @@
     
     @Override
     public String toString() {
-      return getClass().getSimpleName() + "[" + getName()
+      return getClass().getSimpleName() + "[" + datanode
           + ", utilization=" + utilization + "]";
     }
 
@@ -507,8 +509,8 @@
     }
     
     /** Get the name of the datanode */
-    protected String getName() {
-      return datanode.getName();
+    protected String getDisplayName() {
+      return datanode.toString();
     }
     
     /* Get the storage id of the datanode */
@@ -620,8 +622,8 @@
         
           synchronized (block) {
             // update locations
-            for ( String location : blk.getDatanodes() ) {
-              BalancerDatanode datanode = datanodes.get(location);
+            for ( String storageID : blk.getStorageIDs() ) {
+              BalancerDatanode datanode = datanodes.get(storageID);
               if (datanode != null) { // not an unknown datanode
                 block.addLocation(datanode);
               }
@@ -831,7 +833,7 @@
           this.aboveAvgUtilizedDatanodes.add((Source)datanodeS);
         } else {
           assert(isOverUtilized(datanodeS)) :
-            datanodeS.getName()+ "is not an overUtilized node";
+            datanodeS.getDisplayName()+ "is not an overUtilized node";
           this.overUtilizedDatanodes.add((Source)datanodeS);
           overLoadedBytes += (long)((datanodeS.utilization-avg
               -threshold)*datanodeS.datanode.getCapacity()/100.0);
@@ -842,7 +844,7 @@
           this.belowAvgUtilizedDatanodes.add(datanodeS);
         } else {
           assert isUnderUtilized(datanodeS) : "isUnderUtilized("
-              + datanodeS.getName() + ")=" + isUnderUtilized(datanodeS)
+              + datanodeS.getDisplayName() + ")=" + isUnderUtilized(datanodeS)
               + ", utilization=" + datanodeS.utilization; 
           this.underUtilizedDatanodes.add(datanodeS);
           underLoadedBytes += (long)((avg-threshold-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
index c4208b7..352b77b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
@@ -200,7 +200,7 @@
           Thread.sleep(keyUpdaterInterval);
         }
       } catch (InterruptedException e) {
-        LOG.info("InterruptedException in block key updater thread", e);
+        LOG.debug("InterruptedException in block key updater thread", e);
       } catch (Throwable e) {
         LOG.error("Exception in block key updater thread", e);
         shouldRun = false;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index f7c33ca..e3eecad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -19,9 +19,6 @@
 
 import java.io.IOException;
 
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.fs.ContentSummary;
 
 /** 
@@ -31,19 +28,24 @@
 public interface BlockCollection {
   /**
    * Get the last block of the collection.
-   * Make sure it has the right type.
    */
-  public <T extends BlockInfo> T getLastBlock() throws IOException;
+  public BlockInfo getLastBlock() throws IOException;
 
   /** 
    * Get content summary.
    */
   public ContentSummary computeContentSummary();
 
-  /** @return the number of blocks */ 
+  /**
+   * @return the number of blocks
+   */ 
   public int numBlocks();
 
+  /**
+   * Get the blocks.
+   */
   public BlockInfo[] getBlocks();
+
   /**
    * Get preferred block size for the collection 
    * @return preferred block size in bytes
@@ -57,7 +59,7 @@
   public short getReplication();
 
   /**
-   *  Get name of collection.
+   * Get the name of the collection.
    */
   public String getName();
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 1568e23..527a997 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -437,7 +437,7 @@
    * @throws IOException if the block does not have at least a minimal number
    * of replicas reported from data-nodes.
    */
-  private boolean commitBlock(final BlockInfoUnderConstruction block,
+  private static boolean commitBlock(final BlockInfoUnderConstruction block,
       final Block commitBlock) throws IOException {
     if (block.getBlockUCState() == BlockUCState.COMMITTED)
       return false;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index a1e7a20..6995a2e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import static org.apache.hadoop.hdfs.server.common.Util.now;
+
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
@@ -25,6 +27,7 @@
 import java.util.Set;
 import java.util.TreeSet;
 
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -49,13 +52,19 @@
  */
 @InterfaceAudience.Private
 public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
+  private static final String enableDebugLogging =
+    "For more information, please enable DEBUG log level on "
+    + ((Log4JLogger)LOG).getLogger().getName();
+
   private boolean considerLoad; 
   private boolean preferLocalNode = true;
   private NetworkTopology clusterMap;
   private FSClusterStats stats;
-  static final String enableDebugLogging = "For more information, please enable"
-    + " DEBUG level logging on the "
-    + "org.apache.hadoop.hdfs.server.namenode.FSNamesystem logger.";
+  private long heartbeatInterval;   // interval for DataNode heartbeats
+  /**
+   * A miss of that many heartbeats is tolerated for replica deletion policy.
+   */
+  private int tolerateHeartbeatMultiplier;
 
   BlockPlacementPolicyDefault(Configuration conf,  FSClusterStats stats,
                            NetworkTopology clusterMap) {
@@ -71,6 +80,12 @@
     this.considerLoad = conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
     this.stats = stats;
     this.clusterMap = clusterMap;
+    this.heartbeatInterval = conf.getLong(
+        DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
+        DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000;
+    this.tolerateHeartbeatMultiplier = conf.getInt(
+        DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_KEY,
+        DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT);
   }
 
   private ThreadLocal<StringBuilder> threadLocalBuilder =
@@ -551,24 +566,33 @@
                                                  short replicationFactor,
                                                  Collection<DatanodeDescriptor> first, 
                                                  Collection<DatanodeDescriptor> second) {
+    long oldestHeartbeat =
+      now() - heartbeatInterval * tolerateHeartbeatMultiplier;
+    DatanodeDescriptor oldestHeartbeatNode = null;
     long minSpace = Long.MAX_VALUE;
-    DatanodeDescriptor cur = null;
+    DatanodeDescriptor minSpaceNode = null;
 
     // pick replica from the first Set. If first is empty, then pick replicas
     // from second set.
     Iterator<DatanodeDescriptor> iter =
           first.isEmpty() ? second.iterator() : first.iterator();
 
-    // pick node with least free space
+    // Pick the node with the oldest heartbeat or with the least free space,
+    // if all hearbeats are within the tolerable heartbeat interval
     while (iter.hasNext() ) {
       DatanodeDescriptor node = iter.next();
       long free = node.getRemaining();
+      long lastHeartbeat = node.getLastUpdate();
+      if(lastHeartbeat < oldestHeartbeat) {
+        oldestHeartbeat = lastHeartbeat;
+        oldestHeartbeatNode = node;
+      }
       if (minSpace > free) {
         minSpace = free;
-        cur = node;
+        minSpaceNode = node;
       }
     }
-    return cur;
+    return oldestHeartbeatNode != null ? oldestHeartbeatNode : minSpaceNode;
   }
   
   @VisibleForTesting
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 7f795cd..2aee0eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -100,11 +100,7 @@
    * with the same storage id; and </li>
    * <li>removed if and only if an existing datanode is restarted to serve a
    * different storage id.</li>
-   * </ul> <br>
-   * The list of the {@link DatanodeDescriptor}s in the map is checkpointed
-   * in the namespace image file. Only the {@link DatanodeInfo} part is 
-   * persistent, the list of blocks is restored from the datanode block
-   * reports. 
+   * </ul> <br> 
    * <p>
    * Mapping: StorageID -> DatanodeDescriptor
    */
@@ -832,7 +828,9 @@
 
     if (InetAddresses.isInetAddress(hostStr)) {
       // The IP:port is sufficient for listing in a report
-      dnId = new DatanodeID(hostStr, "", port);
+      dnId = new DatanodeID(hostStr, "", "", port,
+          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
+          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
     } else {
       String ipAddr = "";
       try {
@@ -840,7 +838,9 @@
       } catch (UnknownHostException e) {
         LOG.warn("Invalid hostname " + hostStr + " in hosts file");
       }
-      dnId = new DatanodeID(ipAddr, hostStr, port);
+      dnId = new DatanodeID(ipAddr, hostStr, "", port,
+          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
+          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
     }
     return dnId;
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
index d4c0f1c..588d8df 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
@@ -19,7 +19,6 @@
 
 import java.io.PrintWriter;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
@@ -54,10 +53,23 @@
     return numBlocks;
   }
 
-  /** Does this contain the block which is associated with the storage? */
+  /**
+   * @return true if the given storage has the given block listed for
+   * invalidation. Blocks are compared including their generation stamps:
+   * if a block is pending invalidation but with a different generation stamp,
+   * returns false.
+   * @param storageID the storage to check
+   * @param the block to look for
+   * 
+   */
   synchronized boolean contains(final String storageID, final Block block) {
-    final Collection<Block> s = node2blocks.get(storageID);
-    return s != null && s.contains(block);
+    final LightWeightHashSet<Block> s = node2blocks.get(storageID);
+    if (s == null) {
+      return false; // no invalidate blocks for this storage ID
+    }
+    Block blockInSet = s.getElement(block);
+    return blockInSet != null &&
+        block.getGenerationStamp() == blockInSet.getGenerationStamp();
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/MutableBlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/MutableBlockCollection.java
index 2b5b3e4..41975d33 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/MutableBlockCollection.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/MutableBlockCollection.java
@@ -19,26 +19,20 @@
 
 import java.io.IOException;
 
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.fs.ContentSummary;
-
 /** 
  * This interface is used by the block manager to expose a
  * few characteristics of a collection of Block/BlockUnderConstruction.
  */
 public interface MutableBlockCollection extends BlockCollection {
   /**
-   * Set block 
+   * Set the block at the given index.
    */
-  public void setBlock(int idx, BlockInfo blk);
+  public void setBlock(int index, BlockInfo blk);
 
   /**
-   * Convert the last block of the collection to an under-construction block.
-   * Set its locations.
+   * Convert the last block of the collection to an under-construction block
+   * and set the locations.
    */
   public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
-                       DatanodeDescriptor[] targets) throws IOException;
+      DatanodeDescriptor[] locations) throws IOException;
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
index 5a13a61..d75a267 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
@@ -71,10 +71,12 @@
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.VersionInfo;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
+
 @InterfaceAudience.Private
 public class JspHelper {
   public static final String CURRENT_CONF = "current.conf";
-  final static public String WEB_UGI_PROPERTY_NAME = DFSConfigKeys.DFS_WEB_UGI_KEY;
   public static final String DELEGATION_PARAMETER_NAME = DelegationParam.NAME;
   public static final String NAMENODE_ADDRESS = "nnaddr";
   static final String SET_DELEGATION = "&" + DELEGATION_PARAMETER_NAME +
@@ -438,9 +440,9 @@
 
   /** Return a table containing version information. */
   public static String getVersionTable() {
-    return "<div id='dfstable'><table>"       
-        + "\n  <tr><td id='col1'>Version:</td><td>" + VersionInfo.getVersion() + ", " + VersionInfo.getRevision()
-        + "\n  <tr><td id='col1'>Compiled:</td><td>" + VersionInfo.getDate() + " by " + VersionInfo.getUser() + " from " + VersionInfo.getBranch()
+    return "<div class='dfstable'><table>"       
+        + "\n  <tr><td class='col1'>Version:</td><td>" + VersionInfo.getVersion() + ", " + VersionInfo.getRevision() + "</td></tr>"
+        + "\n  <tr><td class='col1'>Compiled:</td><td>" + VersionInfo.getDate() + " by " + VersionInfo.getUser() + " from " + VersionInfo.getBranch() + "</td></tr>"
         + "\n</table></div>";
   }
 
@@ -483,11 +485,12 @@
    */
   public static UserGroupInformation getDefaultWebUser(Configuration conf
                                                        ) throws IOException {
-    String[] strings = conf.getStrings(JspHelper.WEB_UGI_PROPERTY_NAME);
-    if (strings == null || strings.length == 0) {
+    String user = conf.get(
+        HADOOP_HTTP_STATIC_USER, DEFAULT_HADOOP_HTTP_STATIC_USER);
+    if (user == null || user.length() == 0) {
       throw new IOException("Cannot determine UGI from request or conf");
     }
-    return UserGroupInformation.createRemoteUser(strings[0]);
+    return UserGroupInformation.createRemoteUser(user);
   }
 
   private static InetSocketAddress getNNServiceAddress(ServletContext context,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 02010d3..9f0c088 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -591,7 +591,8 @@
       processDistributedUpgradeCommand((UpgradeCommand)cmd);
       break;
     case DatanodeProtocol.DNA_RECOVERBLOCK:
-      dn.recoverBlocks(((BlockRecoveryCommand)cmd).getRecoveringBlocks());
+      String who = "NameNode at " + actor.getNNSocketAddress();
+      dn.recoverBlocks(who, ((BlockRecoveryCommand)cmd).getRecoveringBlocks());
       break;
     case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
       LOG.info("DatanodeCommand action: DNA_ACCESSKEYUPDATE");
@@ -608,6 +609,9 @@
       if (bandwidth > 0) {
         DataXceiverServer dxcs =
                      (DataXceiverServer) dn.dataXceiverServer.getRunnable();
+        LOG.info("Updating balance throttler bandwidth from "
+            + dxcs.balanceThrottler.getBandwidth() + " bytes/s "
+            + "to: " + bandwidth + " bytes/s.");
         dxcs.balanceThrottler.setBandwidth(bandwidth);
       }
       break;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
index 3355ee2..2438c3f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
@@ -145,7 +145,7 @@
   void refreshNamenodes(Configuration conf)
       throws IOException {
     LOG.info("Refresh request received for nameservices: "
-        + conf.get(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES));
+        + conf.get(DFSConfigKeys.DFS_NAMESERVICES));
     
     Map<String, Map<String, InetSocketAddress>> newAddressMap = 
       DFSUtil.getNNServiceRpcAddresses(conf);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 72591e0..f0f7c78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -42,6 +42,7 @@
 import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
 import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
+import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -110,6 +111,8 @@
   private final BlockConstructionStage stage;
   private final boolean isTransfer;
 
+  private boolean syncOnClose;
+
   BlockReceiver(final ExtendedBlock block, final DataInputStream in,
       final String inAddr, final String myAddr,
       final BlockConstructionStage stage, 
@@ -245,14 +248,18 @@
    * close files.
    */
   public void close() throws IOException {
-
     IOException ioe = null;
+    if (syncOnClose && (out != null || checksumOut != null)) {
+      datanode.metrics.incrFsyncCount();      
+    }
     // close checksum file
     try {
       if (checksumOut != null) {
         checksumOut.flush();
-        if (datanode.getDnConf().syncOnClose && (cout instanceof FileOutputStream)) {
+        if (syncOnClose && (cout instanceof FileOutputStream)) {
+          long start = Util.now();
           ((FileOutputStream)cout).getChannel().force(true);
+          datanode.metrics.addFsync(Util.now() - start);
         }
         checksumOut.close();
         checksumOut = null;
@@ -267,8 +274,10 @@
     try {
       if (out != null) {
         out.flush();
-        if (datanode.getDnConf().syncOnClose && (out instanceof FileOutputStream)) {
+        if (syncOnClose && (out instanceof FileOutputStream)) {
+          long start = Util.now();
           ((FileOutputStream)out).getChannel().force(true);
+          datanode.metrics.addFsync(Util.now() - start);
         }
         out.close();
         out = null;
@@ -290,12 +299,25 @@
    * Flush block data and metadata files to disk.
    * @throws IOException
    */
-  void flush() throws IOException {
+  void flushOrSync(boolean isSync) throws IOException {
+    if (isSync && (out != null || checksumOut != null)) {
+      datanode.metrics.incrFsyncCount();      
+    }
     if (checksumOut != null) {
       checksumOut.flush();
+      if (isSync && (cout instanceof FileOutputStream)) {
+        long start = Util.now();
+        ((FileOutputStream)cout).getChannel().force(true);
+        datanode.metrics.addFsync(Util.now() - start);
+      }
     }
     if (out != null) {
       out.flush();
+      if (isSync && (out instanceof FileOutputStream)) {
+        long start = Util.now();
+        ((FileOutputStream)out).getChannel().force(true);
+        datanode.metrics.addFsync(Util.now() - start);
+      }
     }
   }
 
@@ -533,7 +555,9 @@
       header.getOffsetInBlock(),
       header.getSeqno(),
       header.isLastPacketInBlock(),
-      header.getDataLen(), endOfHeader);
+      header.getDataLen(),
+      header.getSyncBlock(),
+      endOfHeader);
   }
 
   /**
@@ -549,15 +573,19 @@
    * returns the number of data bytes that the packet has.
    */
   private int receivePacket(long offsetInBlock, long seqno,
-      boolean lastPacketInBlock, int len, int endOfHeader) throws IOException {
+      boolean lastPacketInBlock, int len, boolean syncBlock,
+      int endOfHeader) throws IOException {
     if (LOG.isDebugEnabled()){
       LOG.debug("Receiving one packet for block " + block +
                 " of length " + len +
                 " seqno " + seqno +
                 " offsetInBlock " + offsetInBlock +
+                " syncBlock " + syncBlock +
                 " lastPacketInBlock " + lastPacketInBlock);
     }
-    
+    // make sure the block gets sync'ed upon close
+    this.syncOnClose |= syncBlock && lastPacketInBlock;
+
     // update received bytes
     long firstByteInBlock = offsetInBlock;
     offsetInBlock += len;
@@ -587,6 +615,10 @@
       if(LOG.isDebugEnabled()) {
         LOG.debug("Receiving an empty packet or the end of the block " + block);
       }
+      // flush unless close() would flush anyway
+      if (syncBlock && !lastPacketInBlock) {
+        flushOrSync(true);
+      }
     } else {
       int checksumLen = ((len + bytesPerChecksum - 1)/bytesPerChecksum)*
                                                             checksumSize;
@@ -677,8 +709,8 @@
             );
             checksumOut.write(pktBuf, checksumOff, checksumLen);
           }
-          /// flush entire packet
-          flush();
+          /// flush entire packet, sync unless close() will sync
+          flushOrSync(syncBlock && !lastPacketInBlock);
           
           replicaInfo.setLastChecksumAndDataLen(
             offsetInBlock, lastChunkChecksum
@@ -730,6 +762,7 @@
       String mirrAddr, DataTransferThrottler throttlerArg,
       DatanodeInfo[] downstreams) throws IOException {
 
+      syncOnClose = datanode.getDnConf().syncOnClose;
       boolean responderClosed = false;
       mirrorOut = mirrOut;
       mirrorAddr = mirrAddr;
@@ -768,7 +801,7 @@
           datanode.data.convertTemporaryToRbw(block);
         } else {
           // for isDatnode or TRANSFER_FINALIZED
-          // Finalize the block. Does this fsync()?
+          // Finalize the block.
           datanode.data.finalizeBlock(block);
         }
         datanode.metrics.incrBlocksWritten();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index 6a830db..12ee56e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -701,8 +701,9 @@
    */
   private void writePacketHeader(ByteBuffer pkt, int dataLen, int packetLen) {
     pkt.clear();
+    // both syncBlock and syncPacket are false
     PacketHeader header = new PacketHeader(packetLen, offset, seqno,
-        (dataLen == 0), dataLen);
+        (dataLen == 0), dataLen, false);
     header.putInBuffer(pkt);
   }
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index fdcfa56..9137c92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -163,6 +163,7 @@
 import org.apache.hadoop.util.VersionInfo;
 import org.mortbay.util.ajax.JSON;
 
+import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.BlockingService;
@@ -667,23 +668,16 @@
    * @param nsInfo the namespace info from the first part of the NN handshake
    */
   DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
-    final String xferIp = streamingAddr.getAddress().getHostAddress();
-    DatanodeRegistration bpRegistration = new DatanodeRegistration(xferIp, getXferPort());
-    bpRegistration.setInfoPort(getInfoPort());
-    bpRegistration.setIpcPort(getIpcPort());
-    bpRegistration.setHostName(hostName);
-    bpRegistration.setStorageID(getStorageId());
-    bpRegistration.setSoftwareVersion(VersionInfo.getVersion());
-
     StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
     if (storageInfo == null) {
       // it's null in the case of SimulatedDataSet
-      bpRegistration.getStorageInfo().layoutVersion = HdfsConstants.LAYOUT_VERSION;
-      bpRegistration.setStorageInfo(nsInfo);
-    } else {
-      bpRegistration.setStorageInfo(storageInfo);
+      storageInfo = new StorageInfo(nsInfo);
     }
-    return bpRegistration;
+    DatanodeID dnId = new DatanodeID(
+        streamingAddr.getAddress().getHostAddress(), hostName, 
+        getStorageId(), getXferPort(), getInfoPort(), getIpcPort());
+    return new DatanodeRegistration(dnId, storageInfo, 
+        new ExportedBlockKeys(), VersionInfo.getVersion());
   }
 
   /**
@@ -1713,13 +1707,16 @@
     secureMain(args, null);
   }
 
-  public Daemon recoverBlocks(final Collection<RecoveringBlock> blocks) {
+  public Daemon recoverBlocks(
+      final String who,
+      final Collection<RecoveringBlock> blocks) {
+    
     Daemon d = new Daemon(threadGroup, new Runnable() {
       /** Recover a list of blocks. It is run by the primary datanode. */
       public void run() {
         for(RecoveringBlock b : blocks) {
           try {
-            logRecoverBlock("NameNode", b.getBlock(), b.getLocations());
+            logRecoverBlock(who, b);
             recoverBlock(b);
           } catch (IOException e) {
             LOG.warn("recoverBlocks FAILED: " + b, e);
@@ -1980,14 +1977,13 @@
         datanodes, storages);
   }
   
-  private static void logRecoverBlock(String who,
-      ExtendedBlock block, DatanodeID[] targets) {
-    StringBuilder msg = new StringBuilder(targets[0].toString());
-    for (int i = 1; i < targets.length; i++) {
-      msg.append(", " + targets[i]);
-    }
+  private static void logRecoverBlock(String who, RecoveringBlock rb) {
+    ExtendedBlock block = rb.getBlock();
+    DatanodeInfo[] targets = rb.getLocations();
+    
     LOG.info(who + " calls recoverBlock(block=" + block
-        + ", targets=[" + msg + "])");
+        + ", targets=[" + Joiner.on(", ").join(targets) + "]"
+        + ", newGenerationStamp=" + rb.getNewGenerationStamp() + ")");
   }
 
   @Override // ClientDataNodeProtocol
@@ -2032,6 +2028,18 @@
 
     //get replica information
     synchronized(data) {
+      Block storedBlock = data.getStoredBlock(b.getBlockPoolId(),
+          b.getBlockId());
+      if (null == storedBlock) {
+        throw new IOException(b + " not found in datanode.");
+      }
+      storedGS = storedBlock.getGenerationStamp();
+      if (storedGS < b.getGenerationStamp()) {
+        throw new IOException(storedGS
+            + " = storedGS < b.getGenerationStamp(), b=" + b);
+      }
+      // Update the genstamp with storedGS
+      b.setGenerationStamp(storedGS);
       if (data.isValidRbw(b)) {
         stage = BlockConstructionStage.TRANSFER_RBW;
       } else if (data.isValidBlock(b)) {
@@ -2040,18 +2048,9 @@
         final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
         throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
       }
-
-      storedGS = data.getStoredBlock(b.getBlockPoolId(),
-          b.getBlockId()).getGenerationStamp();
-      if (storedGS < b.getGenerationStamp()) {
-        throw new IOException(
-            storedGS + " = storedGS < b.getGenerationStamp(), b=" + b);        
-      }
       visible = data.getReplicaVisibleLength(b);
     }
-
-    //set storedGS and visible length
-    b.setGenerationStamp(storedGS);
+    //set visible length
     b.setNumBytes(visible);
 
     if (targets.length > 0) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
index 62a2f53..d8ff408 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
@@ -21,6 +21,7 @@
 import java.io.IOException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
+import java.net.URL;
 import java.net.URLEncoder;
 import java.security.PrivilegedExceptionAction;
 import java.text.SimpleDateFormat;
@@ -616,9 +617,12 @@
                                         Configuration conf
                                         ) throws IOException,
                                                  InterruptedException {
-    final String referrer = JspHelper.validateURL(req.getParameter("referrer"));
+    String referrer = null;
     boolean noLink = false;
-    if (referrer == null) {
+    try {
+      referrer = new URL(req.getParameter("referrer")).toString();
+    } catch (IOException e) {
+      referrer = null;
       noLink = true;
     }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
index f7da29b..2d1ff64 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
@@ -29,6 +29,7 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.mortbay.jetty.nio.SelectChannelConnector;
 
 /**
@@ -60,10 +61,7 @@
   @Override
   public void init(DaemonContext context) throws Exception {
     System.err.println("Initializing secure datanode resources");
-    // We should only start up a secure datanode in a Kerberos-secured cluster
-    Configuration conf = new Configuration(); // Skip UGI method to not log in
-    if(!conf.get(HADOOP_SECURITY_AUTHENTICATION).equals("kerberos"))
-      throw new RuntimeException("Cannot start secure datanode in unsecure cluster");
+    Configuration conf = new Configuration();
     
     // Stash command-line arguments for regular datanode
     args = context.getArguments();
@@ -98,7 +96,8 @@
     System.err.println("Successfully obtained privileged resources (streaming port = "
         + ss + " ) (http listener port = " + listener.getConnection() +")");
     
-    if (ss.getLocalPort() >= 1023 || listener.getPort() >= 1023) {
+    if ((ss.getLocalPort() >= 1023 || listener.getPort() >= 1023) &&
+        UserGroupInformation.isSecurityEnabled()) {
       throw new RuntimeException("Cannot start secure datanode with unprivileged ports");
     }
     System.err.println("Opened streaming server at " + streamingAddr);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
index 9e18007..a849cda 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
@@ -61,6 +61,8 @@
   @Metric MutableCounterLong writesFromLocalClient;
   @Metric MutableCounterLong writesFromRemoteClient;
   @Metric MutableCounterLong blocksGetLocalPathInfo;
+
+  @Metric MutableCounterLong fsyncCount;
   
   @Metric MutableCounterLong volumeFailures;
 
@@ -72,6 +74,8 @@
   @Metric MutableRate heartbeats;
   @Metric MutableRate blockReports;
 
+  @Metric MutableRate fsync;
+
   final MetricsRegistry registry = new MetricsRegistry("datanode");
   final String name;
 
@@ -151,6 +155,14 @@
     blocksRead.incr();
   }
 
+  public void incrFsyncCount() {
+    fsyncCount.incr();
+  }
+
+  public void addFsync(long latency) {
+    fsync.add(latency);
+  }
+
   public void shutdown() {
     DefaultMetricsSystem.shutdown();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
index a9aa20d..51e2728 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
@@ -207,7 +207,7 @@
       int logVersion = storage.getLayoutVersion();
       backupInputStream.setBytes(data, logVersion);
 
-      long numTxnsAdvanced = logLoader.loadEditRecords(logVersion, 
+      long numTxnsAdvanced = logLoader.loadEditRecords(
           backupInputStream, true, lastAppliedTxId + 1, null);
       if (numTxnsAdvanced != numTxns) {
         throw new IOException("Batch of txns starting at txnid " +
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
index ebf4f48..2f6fe8c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
@@ -60,19 +61,10 @@
   }
 
   @Override
-  public long getNumberOfTransactions(long fromTxnId, boolean inProgressOk)
-      throws IOException, CorruptionException {
+  public void selectInputStreams(Collection<EditLogInputStream> streams,
+      long fromTxnId, boolean inProgressOk) {
     // This JournalManager is never used for input. Therefore it cannot
     // return any transactions
-    return 0;
-  }
-  
-  @Override
-  public EditLogInputStream getInputStream(long fromTxnId, boolean inProgressOk)
-      throws IOException {
-    // This JournalManager is never used for input. Therefore it cannot
-    // return any transactions
-    throw new IOException("Unsupported operation");
   }
 
   @Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
index 1f514cdf..b3c45ff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
@@ -119,7 +119,7 @@
 
     this.version = version;
 
-    reader = new FSEditLogOp.Reader(in, version);
+    reader = new FSEditLogOp.Reader(in, tracker, version);
   }
 
   void clear() throws IOException {
@@ -129,12 +129,12 @@
   }
 
   @Override
-  public long getFirstTxId() throws IOException {
+  public long getFirstTxId() {
     return HdfsConstants.INVALID_TXID;
   }
 
   @Override
-  public long getLastTxId() throws IOException {
+  public long getLastTxId() {
     return HdfsConstants.INVALID_TXID;
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
index 0b00187..e6ddf5b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
@@ -24,10 +24,14 @@
 import java.io.BufferedInputStream;
 import java.io.EOFException;
 import java.io.DataInputStream;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.io.IOUtils;
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Throwables;
 
 /**
  * An implementation of the abstract class {@link EditLogInputStream}, which
@@ -35,13 +39,21 @@
  */
 public class EditLogFileInputStream extends EditLogInputStream {
   private final File file;
-  private final FileInputStream fStream;
-  final private long firstTxId;
-  final private long lastTxId;
-  private final int logVersion;
-  private final FSEditLogOp.Reader reader;
-  private final FSEditLogLoader.PositionTrackingInputStream tracker;
+  private final long firstTxId;
+  private final long lastTxId;
   private final boolean isInProgress;
+  static private enum State {
+    UNINIT,
+    OPEN,
+    CLOSED
+  }
+  private State state = State.UNINIT;
+  private FileInputStream fStream = null;
+  private int logVersion = 0;
+  private FSEditLogOp.Reader reader = null;
+  private FSEditLogLoader.PositionTrackingInputStream tracker = null;
+  private DataInputStream dataIn = null;
+  static final Log LOG = LogFactory.getLog(EditLogInputStream.class);
   
   /**
    * Open an EditLogInputStream for the given file.
@@ -68,34 +80,43 @@
    *         header
    */
   public EditLogFileInputStream(File name, long firstTxId, long lastTxId,
-      boolean isInProgress)
-      throws LogHeaderCorruptException, IOException {
-    file = name;
-    fStream = new FileInputStream(name);
-
-    BufferedInputStream bin = new BufferedInputStream(fStream);
-    tracker = new FSEditLogLoader.PositionTrackingInputStream(bin);
-    DataInputStream in = new DataInputStream(tracker);
-
-    try {
-      logVersion = readLogVersion(in);
-    } catch (EOFException eofe) {
-      throw new LogHeaderCorruptException("No header found in log");
-    }
-
-    reader = new FSEditLogOp.Reader(in, logVersion);
+      boolean isInProgress) {
+    this.file = name;
     this.firstTxId = firstTxId;
     this.lastTxId = lastTxId;
     this.isInProgress = isInProgress;
   }
 
+  private void init() throws LogHeaderCorruptException, IOException {
+    Preconditions.checkState(state == State.UNINIT);
+    BufferedInputStream bin = null;
+    try {
+      fStream = new FileInputStream(file);
+      bin = new BufferedInputStream(fStream);
+      tracker = new FSEditLogLoader.PositionTrackingInputStream(bin);
+      dataIn = new DataInputStream(tracker);
+      try {
+        logVersion = readLogVersion(dataIn);
+      } catch (EOFException eofe) {
+        throw new LogHeaderCorruptException("No header found in log");
+      }
+      reader = new FSEditLogOp.Reader(dataIn, tracker, logVersion);
+      state = State.OPEN;
+    } finally {
+      if (reader == null) {
+        IOUtils.cleanup(LOG, dataIn, tracker, bin, fStream);
+        state = State.CLOSED;
+      }
+    }
+  }
+
   @Override
-  public long getFirstTxId() throws IOException {
+  public long getFirstTxId() {
     return firstTxId;
   }
   
   @Override
-  public long getLastTxId() throws IOException {
+  public long getLastTxId() {
     return lastTxId;
   }
 
@@ -104,33 +125,95 @@
     return file.getPath();
   }
 
+  private FSEditLogOp nextOpImpl(boolean skipBrokenEdits) throws IOException {
+    FSEditLogOp op = null;
+    switch (state) {
+    case UNINIT:
+      try {
+        init();
+      } catch (Throwable e) {
+        LOG.error("caught exception initializing " + this, e);
+        if (skipBrokenEdits) {
+          return null;
+        }
+        Throwables.propagateIfPossible(e, IOException.class);
+      }
+      Preconditions.checkState(state != State.UNINIT);
+      return nextOpImpl(skipBrokenEdits);
+    case OPEN:
+      op = reader.readOp(skipBrokenEdits);
+      if ((op != null) && (op.hasTransactionId())) {
+        long txId = op.getTransactionId();
+        if ((txId >= lastTxId) &&
+            (lastTxId != HdfsConstants.INVALID_TXID)) {
+          //
+          // Sometimes, the NameNode crashes while it's writing to the
+          // edit log.  In that case, you can end up with an unfinalized edit log
+          // which has some garbage at the end.
+          // JournalManager#recoverUnfinalizedSegments will finalize these
+          // unfinished edit logs, giving them a defined final transaction 
+          // ID.  Then they will be renamed, so that any subsequent
+          // readers will have this information.
+          //
+          // Since there may be garbage at the end of these "cleaned up"
+          // logs, we want to be sure to skip it here if we've read everything
+          // we were supposed to read out of the stream.
+          // So we force an EOF on all subsequent reads.
+          //
+          long skipAmt = file.length() - tracker.getPos();
+          if (skipAmt > 0) {
+            LOG.warn("skipping " + skipAmt + " bytes at the end " +
+              "of edit log  '" + getName() + "': reached txid " + txId +
+              " out of " + lastTxId);
+            tracker.skip(skipAmt);
+          }
+        }
+      }
+      break;
+      case CLOSED:
+        break; // return null
+    }
+    return op;
+  }
+
   @Override
   protected FSEditLogOp nextOp() throws IOException {
-    return reader.readOp(false);
+    return nextOpImpl(false);
   }
-  
+
   @Override
   protected FSEditLogOp nextValidOp() {
     try {
-      return reader.readOp(true);
-    } catch (IOException e) {
+      return nextOpImpl(true);
+    } catch (Throwable e) {
+      LOG.error("nextValidOp: got exception while reading " + this, e);
       return null;
     }
   }
 
   @Override
   public int getVersion() throws IOException {
+    if (state == State.UNINIT) {
+      init();
+    }
     return logVersion;
   }
 
   @Override
   public long getPosition() {
-    return tracker.getPos();
+    if (state == State.OPEN) {
+      return tracker.getPos();
+    } else {
+      return 0;
+    }
   }
 
   @Override
   public void close() throws IOException {
-    fStream.close();
+    if (state == State.OPEN) {
+      dataIn.close();
+    }
+    state = State.CLOSED;
   }
 
   @Override
@@ -153,12 +236,12 @@
     EditLogFileInputStream in;
     try {
       in = new EditLogFileInputStream(file);
-    } catch (LogHeaderCorruptException corrupt) {
+      in.getVersion(); // causes us to read the header
+    } catch (LogHeaderCorruptException e) {
       // If the header is malformed or the wrong value, this indicates a corruption
-      FSImage.LOG.warn("Log at " + file + " has no valid header",
-          corrupt);
+      LOG.warn("Log file " + file + " has no valid header", e);
       return new FSEditLogLoader.EditLogValidation(0,
-          HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID, true);
+          HdfsConstants.INVALID_TXID, true);
     }
     
     try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
index b07893fa..dd8102e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
@@ -41,12 +41,13 @@
 @InterfaceAudience.Private
 public class EditLogFileOutputStream extends EditLogOutputStream {
   private static Log LOG = LogFactory.getLog(EditLogFileOutputStream.class);
+  public static final int PREALLOCATION_LENGTH = 1024 * 1024;
 
   private File file;
   private FileOutputStream fp; // file stream for storing edit logs
   private FileChannel fc; // channel of the file stream for sync
   private EditsDoubleBuffer doubleBuf;
-  static ByteBuffer fill = ByteBuffer.allocateDirect(1024 * 1024); // preallocation, 1MB
+  static ByteBuffer fill = ByteBuffer.allocateDirect(PREALLOCATION_LENGTH);
 
   static {
     fill.position(0);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java
index c2b42be..f9b84c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java
@@ -45,12 +45,12 @@
   /** 
    * @return the first transaction which will be found in this stream
    */
-  public abstract long getFirstTxId() throws IOException;
+  public abstract long getFirstTxId();
   
   /** 
    * @return the last transaction which will be found in this stream
    */
-  public abstract long getLastTxId() throws IOException;
+  public abstract long getLastTxId();
 
 
   /**
@@ -73,14 +73,14 @@
     }
     return nextOp();
   }
-
+  
   /** 
    * Position the stream so that a valid operation can be read from it with
    * readOp().
    * 
    * This method can be used to skip over corrupted sections of edit logs.
    */
-  public void resync() throws IOException {
+  public void resync() {
     if (cachedOp != null) {
       return;
     }
@@ -109,7 +109,7 @@
     // error recovery will want to override this.
     try {
       return nextOp();
-    } catch (IOException e) {
+    } catch (Throwable e) {
       return null;
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 65b0c0e..5ebe5b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -25,6 +25,7 @@
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Iterator;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -232,6 +233,10 @@
         DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT);
 
     journalSet = new JournalSet(minimumRedundantJournals);
+    // set runtime so we can test starting with a faulty or unavailable
+    // shared directory
+    this.journalSet.setRuntimeForTesting(runtime);
+
     for (URI u : dirs) {
       boolean required = FSNamesystem.getRequiredNamespaceEditsDirs(conf)
           .contains(u);
@@ -269,13 +274,14 @@
     long segmentTxId = getLastWrittenTxId() + 1;
     // Safety check: we should never start a segment if there are
     // newer txids readable.
-    EditLogInputStream s = journalSet.getInputStream(segmentTxId, true);
-    try {
-      Preconditions.checkState(s == null,
-          "Cannot start writing at txid %s when there is a stream " +
-          "available for read: %s", segmentTxId, s);
-    } finally {
-      IOUtils.closeStream(s);
+    List<EditLogInputStream> streams = new ArrayList<EditLogInputStream>();
+    journalSet.selectInputStreams(streams, segmentTxId, true);
+    if (!streams.isEmpty()) {
+      String error = String.format("Cannot start writing at txid %s " +
+        "when there is a stream available for read: %s",
+        segmentTxId, streams.get(0));
+      IOUtils.cleanup(LOG, streams.toArray(new EditLogInputStream[0]));
+      throw new IllegalStateException(error);
     }
     
     startLogSegmentAndWriteHeaderTxn(segmentTxId);
@@ -895,7 +901,7 @@
    * Used only by unit tests.
    */
   @VisibleForTesting
-  synchronized void setRuntimeForTesting(Runtime runtime) {
+  synchronized public void setRuntimeForTesting(Runtime runtime) {
     this.runtime = runtime;
     this.journalSet.setRuntimeForTesting(runtime);
   }
@@ -1199,10 +1205,10 @@
       // All journals have failed, it is handled in logSync.
     }
   }
-  
-  Collection<EditLogInputStream> selectInputStreams(long fromTxId,
-      long toAtLeastTxId) throws IOException {
-    return selectInputStreams(fromTxId, toAtLeastTxId, true);
+
+  public Collection<EditLogInputStream> selectInputStreams(
+      long fromTxId, long toAtLeastTxId) throws IOException {
+    return selectInputStreams(fromTxId, toAtLeastTxId, null, true);
   }
 
   /**
@@ -1212,25 +1218,71 @@
    * @param toAtLeast the selected streams must contain this transaction
    * @param inProgessOk set to true if in-progress streams are OK
    */
-  public synchronized Collection<EditLogInputStream> selectInputStreams(long fromTxId,
-      long toAtLeastTxId, boolean inProgressOk) throws IOException {
+  public synchronized Collection<EditLogInputStream> selectInputStreams(
+      long fromTxId, long toAtLeastTxId, MetaRecoveryContext recovery,
+      boolean inProgressOk) throws IOException {
     List<EditLogInputStream> streams = new ArrayList<EditLogInputStream>();
-    EditLogInputStream stream = journalSet.getInputStream(fromTxId, inProgressOk);
-    while (stream != null) {
-      streams.add(stream);
-      // We're now looking for a higher range, so reset the fromTxId
-      fromTxId = stream.getLastTxId() + 1;
-      stream = journalSet.getInputStream(fromTxId, inProgressOk);
+    journalSet.selectInputStreams(streams, fromTxId, inProgressOk);
+
+    try {
+      checkForGaps(streams, fromTxId, toAtLeastTxId, inProgressOk);
+    } catch (IOException e) {
+      if (recovery != null) {
+        // If recovery mode is enabled, continue loading even if we know we
+        // can't load up to toAtLeastTxId.
+        LOG.error(e);
+      } else {
+        closeAllStreams(streams);
+        throw e;
+      }
     }
-    
-    if (fromTxId <= toAtLeastTxId) {
-      closeAllStreams(streams);
-      throw new IOException(String.format("Gap in transactions. Expected to "
-          + "be able to read up until at least txid %d but unable to find any "
-          + "edit logs containing txid %d", toAtLeastTxId, fromTxId));
+    // This code will go away as soon as RedundantEditLogInputStream is
+    // introduced. (HDFS-3049)
+    try {
+      if (!streams.isEmpty()) {
+        streams.get(0).skipUntil(fromTxId);
+      }
+    } catch (IOException e) {
+      // We don't want to throw an exception from here, because that would make
+      // recovery impossible even if the user requested it.  An exception will
+      // be thrown later, when we don't read the starting txid we expect.
+      LOG.error("error skipping until transaction " + fromTxId, e);
     }
     return streams;
   }
+  
+  /**
+   * Check for gaps in the edit log input stream list.
+   * Note: we're assuming that the list is sorted and that txid ranges don't
+   * overlap.  This could be done better and with more generality with an
+   * interval tree.
+   */
+  private void checkForGaps(List<EditLogInputStream> streams, long fromTxId,
+      long toAtLeastTxId, boolean inProgressOk) throws IOException {
+    Iterator<EditLogInputStream> iter = streams.iterator();
+    long txId = fromTxId;
+    while (true) {
+      if (txId > toAtLeastTxId) return;
+      if (!iter.hasNext()) break;
+      EditLogInputStream elis = iter.next();
+      if (elis.getFirstTxId() > txId) break;
+      long next = elis.getLastTxId();
+      if (next == HdfsConstants.INVALID_TXID) {
+        if (!inProgressOk) {
+          throw new RuntimeException("inProgressOk = false, but " +
+              "selectInputStreams returned an in-progress edit " +
+              "log input stream (" + elis + ")");
+        }
+        // We don't know where the in-progress stream ends.
+        // It could certainly go all the way up to toAtLeastTxId.
+        return;
+      }
+      txId = next + 1;
+    }
+    throw new IOException(String.format("Gap in transactions. Expected to "
+        + "be able to read up until at least txid %d but unable to find any "
+        + "edit logs containing txid %d", toAtLeastTxId, txId));
+  }
 
   /** 
    * Close all the streams in a collection
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 76c661d..e1b26bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -85,12 +85,10 @@
    */
   long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId,
       MetaRecoveryContext recovery) throws IOException {
-    int logVersion = edits.getVersion();
-
     fsNamesys.writeLock();
     try {
       long startTime = now();
-      long numEdits = loadEditRecords(logVersion, edits, false, 
+      long numEdits = loadEditRecords(edits, false, 
                                  expectedStartingTxId, recovery);
       FSImage.LOG.info("Edits file " + edits.getName() 
           + " of size " + edits.length() + " edits # " + numEdits 
@@ -102,7 +100,7 @@
     }
   }
 
-  long loadEditRecords(int logVersion, EditLogInputStream in, boolean closeOnExit,
+  long loadEditRecords(EditLogInputStream in, boolean closeOnExit,
                       long expectedStartingTxId, MetaRecoveryContext recovery)
       throws IOException {
     FSDirectory fsDir = fsNamesys.dir;
@@ -141,10 +139,10 @@
             }
           } catch (Throwable e) {
             // Handle a problem with our input
-            check203UpgradeFailure(logVersion, e);
+            check203UpgradeFailure(in.getVersion(), e);
             String errorMessage =
               formatEditLogReplayError(in, recentOpcodeOffsets, expectedTxId);
-            FSImage.LOG.error(errorMessage);
+            FSImage.LOG.error(errorMessage, e);
             if (recovery == null) {
                // We will only try to skip over problematic opcodes when in
                // recovery mode.
@@ -158,7 +156,7 @@
           }
           recentOpcodeOffsets[(int)(numEdits % recentOpcodeOffsets.length)] =
             in.getPosition();
-          if (LayoutVersion.supports(Feature.STORED_TXIDS, logVersion)) {
+          if (op.hasTransactionId()) {
             if (op.getTransactionId() > expectedTxId) { 
               MetaRecoveryContext.editLogLoaderPrompt("There appears " +
                   "to be a gap in the edit log.  We expected txid " +
@@ -175,7 +173,7 @@
             }
           }
           try {
-            applyEditLogOp(op, fsDir, logVersion);
+            applyEditLogOp(op, fsDir, in.getVersion());
           } catch (Throwable e) {
             LOG.error("Encountered exception on operation " + op, e);
             MetaRecoveryContext.editLogLoaderPrompt("Failed to " +
@@ -192,7 +190,7 @@
             expectedTxId = lastAppliedTxId = expectedStartingTxId;
           }
           // log progress
-          if (LayoutVersion.supports(Feature.STORED_TXIDS, logVersion)) {
+          if (op.hasTransactionId()) {
             long now = now();
             if (now - lastLogTime > REPLAY_TRANSACTION_LOG_INTERVAL) {
               int percent = Math.round((float)lastAppliedTxId / numTxns * 100);
@@ -647,112 +645,119 @@
   }
   
   /**
-   * Return the number of valid transactions in the stream. If the stream is
-   * truncated during the header, returns a value indicating that there are
-   * 0 valid transactions. This reads through the stream but does not close
-   * it.
+   * Find the last valid transaction ID in the stream.
+   * If there are invalid or corrupt transactions in the middle of the stream,
+   * validateEditLog will skip over them.
+   * This reads through the stream but does not close it.
+   *
    * @throws IOException if the stream cannot be read due to an IO error (eg
    *                     if the log does not exist)
    */
   static EditLogValidation validateEditLog(EditLogInputStream in) {
     long lastPos = 0;
-    long firstTxId = HdfsConstants.INVALID_TXID;
     long lastTxId = HdfsConstants.INVALID_TXID;
     long numValid = 0;
-    try {
-      FSEditLogOp op = null;
-      while (true) {
-        lastPos = in.getPosition();
+    FSEditLogOp op = null;
+    while (true) {
+      lastPos = in.getPosition();
+      try {
         if ((op = in.readOp()) == null) {
           break;
         }
-        if (firstTxId == HdfsConstants.INVALID_TXID) {
-          firstTxId = op.getTransactionId();
-        }
-        if (lastTxId == HdfsConstants.INVALID_TXID
-            || op.getTransactionId() == lastTxId + 1) {
-          lastTxId = op.getTransactionId();
-        } else {
-          FSImage.LOG.error("Out of order txid found. Found " +
-            op.getTransactionId() + ", expected " + (lastTxId + 1));
-          break;
-        }
-        numValid++;
+      } catch (Throwable t) {
+        FSImage.LOG.warn("Caught exception after reading " + numValid +
+            " ops from " + in + " while determining its valid length." +
+            "Position was " + lastPos, t);
+        break;
       }
-    } catch (Throwable t) {
-      // Catch Throwable and not just IOE, since bad edits may generate
-      // NumberFormatExceptions, AssertionErrors, OutOfMemoryErrors, etc.
-      FSImage.LOG.debug("Caught exception after reading " + numValid +
-          " ops from " + in + " while determining its valid length.", t);
+      if (lastTxId == HdfsConstants.INVALID_TXID
+          || op.getTransactionId() > lastTxId) {
+        lastTxId = op.getTransactionId();
+      }
+      numValid++;
     }
-    return new EditLogValidation(lastPos, firstTxId, lastTxId, false);
+    return new EditLogValidation(lastPos, lastTxId, false);
   }
-  
+
   static class EditLogValidation {
     private final long validLength;
-    private final long startTxId;
     private final long endTxId;
-    private final boolean corruptionDetected;
-     
-    EditLogValidation(long validLength, long startTxId, long endTxId,
-        boolean corruptionDetected) {
+    private final boolean hasCorruptHeader;
+
+    EditLogValidation(long validLength, long endTxId,
+        boolean hasCorruptHeader) {
       this.validLength = validLength;
-      this.startTxId = startTxId;
       this.endTxId = endTxId;
-      this.corruptionDetected = corruptionDetected;
+      this.hasCorruptHeader = hasCorruptHeader;
     }
-    
+
     long getValidLength() { return validLength; }
-    
-    long getStartTxId() { return startTxId; }
-    
+
     long getEndTxId() { return endTxId; }
-    
-    long getNumTransactions() { 
-      if (endTxId == HdfsConstants.INVALID_TXID
-          || startTxId == HdfsConstants.INVALID_TXID) {
-        return 0;
-      }
-      return (endTxId - startTxId) + 1;
-    }
-    
-    boolean hasCorruptHeader() { return corruptionDetected; }
+
+    boolean hasCorruptHeader() { return hasCorruptHeader; }
   }
 
   /**
    * Stream wrapper that keeps track of the current stream position.
+   * 
+   * This stream also allows us to set a limit on how many bytes we can read
+   * without getting an exception.
    */
-  public static class PositionTrackingInputStream extends FilterInputStream {
+  public static class PositionTrackingInputStream extends FilterInputStream
+      implements StreamLimiter {
     private long curPos = 0;
     private long markPos = -1;
+    private long limitPos = Long.MAX_VALUE;
 
     public PositionTrackingInputStream(InputStream is) {
       super(is);
     }
 
+    private void checkLimit(long amt) throws IOException {
+      long extra = (curPos + amt) - limitPos;
+      if (extra > 0) {
+        throw new IOException("Tried to read " + amt + " byte(s) past " +
+            "the limit at offset " + limitPos);
+      }
+    }
+    
+    @Override
     public int read() throws IOException {
+      checkLimit(1);
       int ret = super.read();
       if (ret != -1) curPos++;
       return ret;
     }
 
+    @Override
     public int read(byte[] data) throws IOException {
+      checkLimit(data.length);
       int ret = super.read(data);
       if (ret > 0) curPos += ret;
       return ret;
     }
 
+    @Override
     public int read(byte[] data, int offset, int length) throws IOException {
+      checkLimit(length);
       int ret = super.read(data, offset, length);
       if (ret > 0) curPos += ret;
       return ret;
     }
 
+    @Override
+    public void setLimit(long limit) {
+      limitPos = curPos + limit;
+    }
+
+    @Override
     public void mark(int limit) {
       super.mark(limit);
       markPos = curPos;
     }
 
+    @Override
     public void reset() throws IOException {
       if (markPos == -1) {
         throw new IOException("Not marked!");
@@ -765,6 +770,18 @@
     public long getPos() {
       return curPos;
     }
+    
+    @Override
+    public long skip(long amt) throws IOException {
+      long extra = (curPos + amt) - limitPos;
+      if (extra > 0) {
+        throw new IOException("Tried to skip " + extra + " bytes past " +
+            "the limit at offset " + limitPos);
+      }
+      long ret = super.skip(amt);
+      curPos += ret;
+      return ret;
+    }
   }
 
   public long getLastAppliedTxId() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index 9f7742c..489f030 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -75,6 +75,10 @@
 public abstract class FSEditLogOp {
   public final FSEditLogOpCodes opCode;
   long txid;
+  /**
+   * Opcode size is limited to 1.5 megabytes
+   */
+  public static final int MAX_OP_SIZE = (3 * 1024 * 1024) / 2;
 
 
   @SuppressWarnings("deprecation")
@@ -2228,6 +2232,7 @@
    */
   public static class Reader {
     private final DataInputStream in;
+    private final StreamLimiter limiter;
     private final int logVersion;
     private final Checksum checksum;
     private final OpInstanceCache cache;
@@ -2238,7 +2243,7 @@
      * @param logVersion The version of the data coming from the stream.
      */
     @SuppressWarnings("deprecation")
-    public Reader(DataInputStream in, int logVersion) {
+    public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
       this.logVersion = logVersion;
       if (LayoutVersion.supports(Feature.EDITS_CHESKUM, logVersion)) {
         this.checksum = new PureJavaCrc32();
@@ -2252,6 +2257,7 @@
       } else {
         this.in = in;
       }
+      this.limiter = limiter;
       this.cache = new OpInstanceCache();
     }
 
@@ -2263,31 +2269,77 @@
      * 
      * @param skipBrokenEdits    If true, attempt to skip over damaged parts of
      * the input stream, rather than throwing an IOException
-     * @return the operation read from the stream, or null at the end of the file
-     * @throws IOException on error.
+     * @return the operation read from the stream, or null at the end of the 
+     *         file
+     * @throws IOException on error.  This function should only throw an
+     *         exception when skipBrokenEdits is false.
      */
     public FSEditLogOp readOp(boolean skipBrokenEdits) throws IOException {
-      FSEditLogOp op = null;
       while (true) {
         try {
-          in.mark(in.available());
-          try {
-            op = decodeOp();
-          } finally {
-            // If we encountered an exception or an end-of-file condition,
-            // do not advance the input stream.
-            if (op == null) {
-              in.reset();
-            }
-          }
-          return op;
-        } catch (IOException e) {
+          limiter.setLimit(MAX_OP_SIZE);
+          in.mark(MAX_OP_SIZE);
+          return decodeOp();
+        } catch (GarbageAfterTerminatorException e) {
+          in.reset();
           if (!skipBrokenEdits) {
             throw e;
           }
-          if (in.skip(1) < 1) {
+          // If we saw a terminator opcode followed by a long region of 0x00 or
+          // 0xff, we want to skip over that region, because there's nothing
+          // interesting there.
+          long numSkip = e.getNumAfterTerminator();
+          if (in.skip(numSkip) < numSkip) {
+            FSImage.LOG.error("Failed to skip " + numSkip + " bytes of " +
+              "garbage after an OP_INVALID.  Unexpected early EOF.");
             return null;
           }
+        } catch (IOException e) {
+          in.reset();
+          if (!skipBrokenEdits) {
+            throw e;
+          }
+        } catch (RuntimeException e) {
+          // FSEditLogOp#decodeOp is not supposed to throw RuntimeException.
+          // However, we handle it here for recovery mode, just to be more
+          // robust.
+          in.reset();
+          if (!skipBrokenEdits) {
+            throw e;
+          }
+        } catch (Throwable e) {
+          in.reset();
+          if (!skipBrokenEdits) {
+            throw new IOException("got unexpected exception " +
+                e.getMessage(), e);
+          }
+        }
+        // Move ahead one byte and re-try the decode process.
+        if (in.skip(1) < 1) {
+          return null;
+        }
+      }
+    }
+
+    private void verifyTerminator() throws IOException {
+      long off = 0;
+      /** The end of the edit log should contain only 0x00 or 0xff bytes.
+       * If it contains other bytes, the log itself may be corrupt.
+       * It is important to check this; if we don't, a stray OP_INVALID byte 
+       * could make us stop reading the edit log halfway through, and we'd never
+       * know that we had lost data.
+       */
+      byte[] buf = new byte[4096];
+      while (true) {
+        int numRead = in.read(buf);
+        if (numRead == -1) {
+          return;
+        }
+        for (int i = 0; i < numRead; i++, off++) {
+          if ((buf[i] != (byte)0) && (buf[i] != (byte)-1)) {
+            throw new GarbageAfterTerminatorException("Read garbage after " +
+            		"the terminator!", off);
+          }
         }
       }
     }
@@ -2306,8 +2358,10 @@
       }
 
       FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(opCodeByte);
-      if (opCode == OP_INVALID)
+      if (opCode == OP_INVALID) {
+        verifyTerminator();
         return null;
+      }
 
       FSEditLogOp op = cache.get(opCode);
       if (op == null) {
@@ -2477,4 +2531,35 @@
     short mode = Short.valueOf(st.getValue("MODE"));
     return new PermissionStatus(username, groupname, new FsPermission(mode));
   }
-		}
+
+  /**
+   * Exception indicating that we found an OP_INVALID followed by some 
+   * garbage.  An OP_INVALID should signify the end of the file... if there 
+   * is additional content after that, then the edit log is corrupt. 
+   */
+  static class GarbageAfterTerminatorException extends IOException {
+    private static final long serialVersionUID = 1L;
+    private final long numAfterTerminator;
+
+    public GarbageAfterTerminatorException(String str,
+        long numAfterTerminator) {
+      super(str);
+      this.numAfterTerminator = numAfterTerminator;
+    }
+
+    /**
+     * Get the number of bytes after the terminator at which the garbage
+     * appeared.
+     *
+     * So if you had an OP_INVALID followed immediately by another valid opcode,
+     * this would be 0.
+     * If you had an OP_INVALID followed by some padding bytes, followed by a
+     * stray byte at the end, this would be the number of padding bytes.
+     * 
+     * @return numAfterTerminator
+     */
+    public long getNumAfterTerminator() {
+      return numAfterTerminator;
+    }
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 2b4abd7..7a8a137 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -54,12 +54,14 @@
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.util.Canceler;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -88,9 +90,6 @@
 
   private final NNStorageRetentionManager archivalManager;
 
-  private SaveNamespaceContext curSaveNamespaceContext = null; 
-
-
   /**
    * Construct an FSImage
    * @param conf Configuration
@@ -536,6 +535,11 @@
     return editLog;
   }
 
+  @VisibleForTesting
+  void setEditLogForTesting(FSEditLog newLog) {
+    editLog = newLog;
+  }
+
   void openEditLogForWrite() throws IOException {
     assert editLog != null : "editLog must be initialized";
     editLog.openForWrite();
@@ -555,7 +559,7 @@
 
   /**
    * Choose latest image from one of the directories,
-   * load it and merge with the edits from that directory.
+   * load it and merge with the edits.
    * 
    * Saving and loading fsimage should never trigger symlink resolution. 
    * The paths that are persisted do not have *intermediate* symlinks 
@@ -591,7 +595,7 @@
       // OK to not be able to read all of edits right now.
       long toAtLeastTxId = editLog.isOpenForWrite() ? inspector.getMaxSeenTxId() : 0;
       editStreams = editLog.selectInputStreams(imageFile.getCheckpointTxId() + 1,
-          toAtLeastTxId, false);
+          toAtLeastTxId, recovery, false);
     } else {
       editStreams = FSImagePreTransactionalStorageInspector
         .getEditLogStreams(storage);
@@ -599,7 +603,10 @@
  
     LOG.debug("Planning to load image :\n" + imageFile);
     for (EditLogInputStream l : editStreams) {
-      LOG.debug("\t Planning to load edit stream: " + l);
+      LOG.debug("Planning to load edit log stream: " + l);
+    }
+    if (!editStreams.iterator().hasNext()) {
+      LOG.info("No edit log streams selected.");
     }
     
     try {
@@ -798,17 +805,28 @@
         try {
           thread.join();
         } catch (InterruptedException iex) {
-          LOG.error("Caught exception while waiting for thread " +
+          LOG.error("Caught interrupted exception while waiting for thread " +
                     thread.getName() + " to finish. Retrying join");
         }        
       }
     }
   }
+  
+  /**
+   * @see #saveNamespace(FSNamesystem, Canceler)
+   */
+  public synchronized void saveNamespace(FSNamesystem source)
+      throws IOException {
+    saveNamespace(source, null);
+  }
+  
   /**
    * Save the contents of the FS image to a new image file in each of the
    * current storage directories.
+   * @param canceler 
    */
-  public synchronized void saveNamespace(FSNamesystem source) throws IOException {
+  public synchronized void saveNamespace(FSNamesystem source,
+      Canceler canceler) throws IOException {
     assert editLog != null : "editLog must be initialized";
     storage.attemptRestoreRemovedStorage();
 
@@ -819,7 +837,7 @@
     }
     long imageTxId = getLastAppliedOrWrittenTxId();
     try {
-      saveFSImageInAllDirs(source, imageTxId);
+      saveFSImageInAllDirs(source, imageTxId, canceler);
       storage.writeAll();
     } finally {
       if (editLogWasOpen) {
@@ -831,27 +849,27 @@
         storage.writeTransactionIdFileToStorage(imageTxId + 1);
       }
     }
-    
-  }
-  
-  public void cancelSaveNamespace(String reason)
-      throws InterruptedException {
-    SaveNamespaceContext ctx = curSaveNamespaceContext;
-    if (ctx != null) {
-      ctx.cancel(reason); // waits until complete
-    }
   }
 
-  
+  /**
+   * @see #saveFSImageInAllDirs(FSNamesystem, long, Canceler)
+   */
   protected synchronized void saveFSImageInAllDirs(FSNamesystem source, long txid)
+      throws IOException {
+    saveFSImageInAllDirs(source, txid, null);
+  }
+
+  protected synchronized void saveFSImageInAllDirs(FSNamesystem source, long txid,
+      Canceler canceler)
       throws IOException {    
     if (storage.getNumStorageDirs(NameNodeDirType.IMAGE) == 0) {
       throw new IOException("No image directories available!");
     }
-    
+    if (canceler == null) {
+      canceler = new Canceler();
+    }
     SaveNamespaceContext ctx = new SaveNamespaceContext(
-        source, txid);
-    curSaveNamespaceContext = ctx;
+        source, txid, canceler);
     
     try {
       List<Thread> saveThreads = new ArrayList<Thread>();
@@ -872,7 +890,7 @@
         throw new IOException(
           "Failed to save in any storage directories while saving namespace.");
       }
-      if (ctx.isCancelled()) {
+      if (canceler.isCancelled()) {
         deleteCancelledCheckpoint(txid);
         ctx.checkCancelled(); // throws
         assert false : "should have thrown above!";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index f666f35..a65f20e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -540,7 +540,6 @@
     private void saveImage(ByteBuffer currentDirName,
                                   INodeDirectory current,
                                   DataOutputStream out) throws IOException {
-      context.checkCancelled();
       List<INode> children = current.getChildrenRaw();
       if (children == null || children.isEmpty())
         return;
@@ -554,9 +553,13 @@
         out.write(currentDirName.array(), 0, prefixLen);
       }
       out.writeInt(children.size());
+      int i = 0;
       for(INode child : children) {
         // print all children first
         FSImageSerialization.saveINode2Image(child, out);
+        if (i++ % 50 == 0) {
+          context.checkCancelled();
+        }
       }
       for(INode child : children) {
         if(!child.isDirectory())
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index f1072f9..fbca355 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -197,18 +197,33 @@
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 
-/***************************************************
- * FSNamesystem does the actual bookkeeping work for the
- * DataNode.
+/**
+ * FSNamesystem is a container of both transient
+ * and persisted name-space state, and does all the book-keeping
+ * work on a NameNode.
  *
- * It tracks several important tables.
+ * Its roles are briefly described below:
  *
- * 1)  valid fsname --> blocklist  (kept on disk, logged)
+ * 1) Is the container for BlockManager, DatanodeManager,
+ *    DelegationTokens, LeaseManager, etc. services.
+ * 2) RPC calls that modify or inspect the name-space
+ *    should get delegated here.
+ * 3) Anything that touches only blocks (eg. block reports),
+ *    it delegates to BlockManager.
+ * 4) Anything that touches only file information (eg. permissions, mkdirs),
+ *    it delegates to FSDirectory.
+ * 5) Anything that crosses two of the above components should be
+ *    coordinated here.
+ * 6) Logs mutations to FSEditLog.
+ *
+ * This class and its contents keep:
+ *
+ * 1)  Valid fsname --> blocklist  (kept on disk, logged)
  * 2)  Set of all valid blocks (inverted #1)
  * 3)  block --> machinelist (kept in memory, rebuilt dynamically from reports)
  * 4)  machine --> blocklist (inverted #2)
  * 5)  LRU cache of updated-heartbeat machines
- ***************************************************/
+ */
 @InterfaceAudience.Private
 @Metrics(context="dfs")
 public class FSNamesystem implements Namesystem, FSClusterStats,
@@ -664,8 +679,12 @@
     }
   }
   
-  /** Start services required in standby state */
-  void startStandbyServices(final Configuration conf) {
+  /**
+   * Start services required in standby state 
+   * 
+   * @throws IOException
+   */
+  void startStandbyServices(final Configuration conf) throws IOException {
     LOG.info("Starting services required for standby state");
     if (!dir.fsImage.editLog.isOpenForRead()) {
       // During startup, we're already open for read.
@@ -687,7 +706,8 @@
    */
   void prepareToStopStandbyServices() throws ServiceFailedException {
     if (standbyCheckpointer != null) {
-      standbyCheckpointer.cancelAndPreventCheckpoints();
+      standbyCheckpointer.cancelAndPreventCheckpoints(
+          "About to leave standby state");
     }
   }
 
@@ -1868,6 +1888,7 @@
       QuotaExceededException, SafeModeException, UnresolvedLinkException,
       IOException {
     checkBlock(previous);
+    Block previousBlock = ExtendedBlock.getLocalBlock(previous);
     long fileLength, blockSize;
     int replication;
     DatanodeDescriptor clientNode = null;
@@ -1890,10 +1911,65 @@
       // have we exceeded the configured limit of fs objects.
       checkFsObjectLimit();
 
-      INodeFileUnderConstruction pendingFile  = checkLease(src, clientName);
+      INodeFileUnderConstruction pendingFile = checkLease(src, clientName);
+      BlockInfo lastBlockInFile = pendingFile.getLastBlock();
+      if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
+        // The block that the client claims is the current last block
+        // doesn't match up with what we think is the last block. There are
+        // three possibilities:
+        // 1) This is the first block allocation of an append() pipeline
+        //    which started appending exactly at a block boundary.
+        //    In this case, the client isn't passed the previous block,
+        //    so it makes the allocateBlock() call with previous=null.
+        //    We can distinguish this since the last block of the file
+        //    will be exactly a full block.
+        // 2) This is a retry from a client that missed the response of a
+        //    prior getAdditionalBlock() call, perhaps because of a network
+        //    timeout, or because of an HA failover. In that case, we know
+        //    by the fact that the client is re-issuing the RPC that it
+        //    never began to write to the old block. Hence it is safe to
+        //    abandon it and allocate a new one.
+        // 3) This is an entirely bogus request/bug -- we should error out
+        //    rather than potentially appending a new block with an empty
+        //    one in the middle, etc
+
+        BlockInfo penultimateBlock = pendingFile.getPenultimateBlock();
+        if (previous == null &&
+            lastBlockInFile != null &&
+            lastBlockInFile.getNumBytes() == pendingFile.getPreferredBlockSize() &&
+            lastBlockInFile.isComplete()) {
+          // Case 1
+          if (NameNode.stateChangeLog.isDebugEnabled()) {
+             NameNode.stateChangeLog.debug(
+                 "BLOCK* NameSystem.allocateBlock: handling block allocation" +
+                 " writing to a file with a complete previous block: src=" +
+                 src + " lastBlock=" + lastBlockInFile);
+          }
+        } else if (Block.matchingIdAndGenStamp(penultimateBlock, previousBlock)) {
+          // Case 2
+          if (lastBlockInFile.getNumBytes() != 0) {
+            throw new IOException(
+                "Request looked like a retry to allocate block " +
+                lastBlockInFile + " but it already contains " +
+                lastBlockInFile.getNumBytes() + " bytes");
+          }
+
+          // The retry case ("b" above) -- abandon the old block.
+          NameNode.stateChangeLog.info("BLOCK* NameSystem.allocateBlock: " +
+              "caught retry for allocation of a new block in " +
+              src + ". Abandoning old block " + lastBlockInFile);
+          dir.removeBlock(src, pendingFile, lastBlockInFile);
+          dir.persistBlocks(src, pendingFile);
+        } else {
+          
+          throw new IOException("Cannot allocate block in " + src + ": " +
+              "passed 'previous' block " + previous + " does not match actual " +
+              "last block in file " + lastBlockInFile);
+        }
+      }
 
       // commit the last block and complete it if it has minimum replicas
-      commitOrCompleteLastBlock(pendingFile, ExtendedBlock.getLocalBlock(previous));
+      commitOrCompleteLastBlock(pendingFile, previousBlock);
 
       //
       // If we fail this, bad things happen!
@@ -2104,7 +2180,29 @@
       throw new SafeModeException("Cannot complete file " + src, safeMode);
     }
 
-    INodeFileUnderConstruction pendingFile = checkLease(src, holder);
+    INodeFileUnderConstruction pendingFile;
+    try {
+      pendingFile = checkLease(src, holder);
+    } catch (LeaseExpiredException lee) {
+      INodeFile file = dir.getFileINode(src);
+      if (file != null && !file.isUnderConstruction()) {
+        // This could be a retry RPC - i.e the client tried to close
+        // the file, but missed the RPC response. Thus, it is trying
+        // again to close the file. If the file still exists and
+        // the client's view of the last block matches the actual
+        // last block, then we'll treat it as a successful close.
+        // See HDFS-3031.
+        Block realLastBlock = file.getLastBlock();
+        if (Block.matchingIdAndGenStamp(last, realLastBlock)) {
+          NameNode.stateChangeLog.info("DIR* NameSystem.completeFile: " +
+              "received request from " + holder + " to complete file " + src +
+              " which is already closed. But, it appears to be an RPC " +
+              "retry. Returning success.");
+          return true;
+        }
+      }
+      throw lee;
+    }
     // commit the last block and complete it if it has minimum replicas
     commitOrCompleteLastBlock(pendingFile, last);
 
@@ -2689,9 +2787,9 @@
       throw new IOException(message);
     }
 
-    // no we know that the last block is not COMPLETE, and
+    // The last block is not COMPLETE, and
     // that the penultimate block if exists is either COMPLETE or COMMITTED
-    BlockInfoUnderConstruction lastBlock = pendingFile.getLastBlock();
+    final BlockInfo lastBlock = pendingFile.getLastBlock();
     BlockUCState lastBlockState = lastBlock.getBlockUCState();
     BlockInfo penultimateBlock = pendingFile.getPenultimateBlock();
     boolean penultimateBlockMinReplication;
@@ -2735,13 +2833,15 @@
       throw new AlreadyBeingCreatedException(message);
     case UNDER_CONSTRUCTION:
     case UNDER_RECOVERY:
+      final BlockInfoUnderConstruction uc = (BlockInfoUnderConstruction)lastBlock;
       // setup the last block locations from the blockManager if not known
-      if(lastBlock.getNumExpectedLocations() == 0)
-        lastBlock.setExpectedLocations(blockManager.getNodes(lastBlock));
+      if (uc.getNumExpectedLocations() == 0) {
+        uc.setExpectedLocations(blockManager.getNodes(lastBlock));
+      }
       // start recovery of the last block for this file
       long blockRecoveryId = nextGenerationStamp();
       lease = reassignLease(lease, src, recoveryLeaseHolder, pendingFile);
-      lastBlock.initializeBlockRecovery(blockRecoveryId);
+      uc.initializeBlockRecovery(blockRecoveryId);
       leaseManager.renewLease(lease);
       // Cannot close file right now, since the last block requires recovery.
       // This may potentially cause infinite loop in lease recovery
@@ -3280,27 +3380,6 @@
   }
   
   /**
-   * Cancel an ongoing saveNamespace operation and wait for its
-   * threads to exit, if one is currently in progress.
-   *
-   * If no such operation is in progress, this call does nothing.
-   *
-   * @param reason a reason to be communicated to the caller saveNamespace 
-   * @throws IOException
-   */
-  void cancelSaveNamespace(String reason) throws IOException {
-    readLock();
-    try {
-      checkSuperuserPrivilege();
-      getFSImage().cancelSaveNamespace(reason);
-    } catch (InterruptedException e) {
-      throw new IOException(e);
-    } finally {
-      readUnlock();
-    }
-  }
-  
-  /**
    * Enables/Disables/Checks restoring failed storage replicas if the storage becomes available again.
    * Requires superuser privilege.
    * 
@@ -4499,15 +4578,16 @@
     LOG.info("updatePipeline(" + oldBlock + ") successfully to " + newBlock);
   }
 
-  /** @see updatePipeline(String, ExtendedBlock, ExtendedBlock, DatanodeID[]) */
+  /** @see #updatePipeline(String, ExtendedBlock, ExtendedBlock, DatanodeID[]) */
   private void updatePipelineInternal(String clientName, ExtendedBlock oldBlock, 
       ExtendedBlock newBlock, DatanodeID[] newNodes)
       throws IOException {
     assert hasWriteLock();
     // check the vadility of the block and lease holder name
-    final INodeFileUnderConstruction pendingFile = 
-      checkUCBlock(oldBlock, clientName);
-    final BlockInfoUnderConstruction blockinfo = pendingFile.getLastBlock();
+    final INodeFileUnderConstruction pendingFile
+        = checkUCBlock(oldBlock, clientName);
+    final BlockInfoUnderConstruction blockinfo
+        = (BlockInfoUnderConstruction)pendingFile.getLastBlock();
 
     // check new GS & length: this is not expected
     if (newBlock.getGenerationStamp() <= blockinfo.getGenerationStamp() ||
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
index 3767111..75caac6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
@@ -22,6 +22,7 @@
 
 import java.io.File;
 import java.io.IOException;
+import java.util.Collection;
 import java.util.List;
 import java.util.Comparator;
 import java.util.Collections;
@@ -73,7 +74,7 @@
 
   @Override 
   public void close() throws IOException {}
-
+  
   @Override
   synchronized public EditLogOutputStream startLogSegment(long txid) 
       throws IOException {
@@ -212,90 +213,46 @@
   }
 
   @Override
-  synchronized public EditLogInputStream getInputStream(long fromTxId,
-      boolean inProgressOk) throws IOException {
-    for (EditLogFile elf : getLogFiles(fromTxId)) {
-      if (elf.containsTxId(fromTxId)) {
-        if (!inProgressOk && elf.isInProgress()) {
+  synchronized public void selectInputStreams(
+      Collection<EditLogInputStream> streams, long fromTxId,
+      boolean inProgressOk) {
+    List<EditLogFile> elfs;
+    try {
+      elfs = matchEditLogs(sd.getCurrentDir());
+    } catch (IOException e) {
+      LOG.error("error listing files in " + this + ". " +
+          "Skipping all edit logs in this directory.", e);
+      return;
+    }
+    LOG.debug(this + ": selecting input streams starting at " + fromTxId + 
+        (inProgressOk ? " (inProgress ok) " : " (excluding inProgress) ") +
+        "from among " + elfs.size() + " candidate file(s)");
+    for (EditLogFile elf : elfs) {
+      if (elf.lastTxId < fromTxId) {
+        LOG.debug("passing over " + elf + " because it ends at " +
+            elf.lastTxId + ", but we only care about transactions " +
+            "as new as " + fromTxId);
+        continue;
+      }
+      if (elf.isInProgress()) {
+        if (!inProgressOk) {
+          LOG.debug("passing over " + elf + " because it is in progress " +
+              "and we are ignoring in-progress logs.");
           continue;
         }
-        if (elf.isInProgress()) {
+        try {
           elf.validateLog();
+        } catch (IOException e) {
+          LOG.error("got IOException while trying to validate header of " +
+              elf + ".  Skipping.", e);
+          continue;
         }
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("Returning edit stream reading from " + elf);
-        }
-        EditLogFileInputStream elfis = new EditLogFileInputStream(elf.getFile(),
+      }
+      EditLogFileInputStream elfis = new EditLogFileInputStream(elf.getFile(),
             elf.getFirstTxId(), elf.getLastTxId(), elf.isInProgress());
-        long transactionsToSkip = fromTxId - elf.getFirstTxId();
-        if (transactionsToSkip > 0) {
-          LOG.info(String.format("Log begins at txid %d, but requested start "
-              + "txid is %d. Skipping %d edits.", elf.getFirstTxId(), fromTxId,
-              transactionsToSkip));
-        }
-        if (elfis.skipUntil(fromTxId) == false) {
-          throw new IOException("failed to advance input stream to txid " +
-              fromTxId);
-        }
-        return elfis;
-      }
+      LOG.debug("selecting edit log stream " + elf);
+      streams.add(elfis);
     }
-
-    throw new IOException("Cannot find editlog file containing " + fromTxId);
-  }
-
-  @Override
-  public long getNumberOfTransactions(long fromTxId, boolean inProgressOk)
-      throws IOException, CorruptionException {
-    long numTxns = 0L;
-    
-    for (EditLogFile elf : getLogFiles(fromTxId)) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Counting " + elf);
-      }
-      if (elf.getFirstTxId() > fromTxId) { // there must be a gap
-        LOG.warn("Gap in transactions in " + sd.getRoot() + ". Gap is "
-            + fromTxId + " - " + (elf.getFirstTxId() - 1));
-        break;
-      } else if (elf.containsTxId(fromTxId)) {
-        if (!inProgressOk && elf.isInProgress()) {
-          break;
-        }
-        
-        if (elf.isInProgress()) {
-          elf.validateLog();
-        } 
-
-        if (elf.hasCorruptHeader()) {
-          break;
-        }
-        numTxns += elf.getLastTxId() + 1 - fromTxId;
-        fromTxId = elf.getLastTxId() + 1;
-        
-        if (elf.isInProgress()) {
-          break;
-        }
-      }
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Journal " + this + " has " + numTxns 
-                + " txns from " + fromTxId);
-    }
-
-    long max = findMaxTransaction(inProgressOk);
-    
-    // fromTxId should be greater than max, as it points to the next 
-    // transaction we should expect to find. If it is less than or equal
-    // to max, it means that a transaction with txid == max has not been found
-    if (numTxns == 0 && fromTxId <= max) { 
-      String error = String.format("Gap in transactions, max txnid is %d"
-                                   + ", 0 txns from %d", max, fromTxId);
-      LOG.error(error);
-      throw new CorruptionException(error);
-    }
-
-    return numTxns;
   }
 
   @Override
@@ -318,7 +275,7 @@
           }
           continue;
         }
-        
+
         elf.validateLog();
 
         if (elf.hasCorruptHeader()) {
@@ -326,19 +283,16 @@
           throw new CorruptionException("In-progress edit log file is corrupt: "
               + elf);
         }
-        
-        // If the file has a valid header (isn't corrupt) but contains no
-        // transactions, we likely just crashed after opening the file and
-        // writing the header, but before syncing any transactions. Safe to
-        // delete the file.
-        if (elf.getNumTransactions() == 0) {
-          LOG.info("Deleting edit log file with zero transactions " + elf);
-          if (!elf.getFile().delete()) {
-            throw new IOException("Unable to delete " + elf.getFile());
-          }
+        if (elf.getLastTxId() == HdfsConstants.INVALID_TXID) {
+          // If the file has a valid header (isn't corrupt) but contains no
+          // transactions, we likely just crashed after opening the file and
+          // writing the header, but before syncing any transactions. Safe to
+          // delete the file.
+          LOG.info("Moving aside edit log file that seems to have zero " +
+              "transactions " + elf);
+          elf.moveAsideEmptyFile();
           continue;
         }
-        
         finalizeLogSegment(elf.getFirstTxId(), elf.getLastTxId());
       }
     }
@@ -361,39 +315,6 @@
     return logFiles;
   }
 
-  /** 
-   * Find the maximum transaction in the journal.
-   */
-  private long findMaxTransaction(boolean inProgressOk)
-      throws IOException {
-    boolean considerSeenTxId = true;
-    long seenTxId = NNStorage.readTransactionIdFile(sd);
-    long maxSeenTransaction = 0;
-    for (EditLogFile elf : getLogFiles(0)) {
-      if (elf.isInProgress() && !inProgressOk) {
-        if (elf.getFirstTxId() != HdfsConstants.INVALID_TXID &&
-            elf.getFirstTxId() <= seenTxId) {
-          // don't look at the seen_txid file if in-progress logs are not to be
-          // examined, and the value in seen_txid falls within the in-progress
-          // segment.
-          considerSeenTxId = false;
-        }
-        continue;
-      }
-      
-      if (elf.isInProgress()) {
-        maxSeenTransaction = Math.max(elf.getFirstTxId(), maxSeenTransaction);
-        elf.validateLog();
-      }
-      maxSeenTransaction = Math.max(elf.getLastTxId(), maxSeenTransaction);
-    }
-    if (considerSeenTxId) {
-      return Math.max(maxSeenTransaction, seenTxId);
-    } else {
-      return maxSeenTransaction;
-    }
-  }
-
   @Override
   public String toString() {
     return String.format("FileJournalManager(root=%s)", sd.getRoot());
@@ -406,7 +327,6 @@
     private File file;
     private final long firstTxId;
     private long lastTxId;
-    private long numTx = -1;
 
     private boolean hasCorruptHeader = false;
     private final boolean isInProgress;
@@ -454,20 +374,15 @@
     }
 
     /** 
-     * Count the number of valid transactions in a log.
+     * Find out where the edit log ends.
      * This will update the lastTxId of the EditLogFile or
      * mark it as corrupt if it is.
      */
     void validateLog() throws IOException {
       EditLogValidation val = EditLogFileInputStream.validateEditLog(file);
-      this.numTx = val.getNumTransactions();
       this.lastTxId = val.getEndTxId();
       this.hasCorruptHeader = val.hasCorruptHeader();
     }
-    
-    long getNumTransactions() {
-      return numTx;
-    }
 
     boolean isInProgress() {
       return isInProgress;
@@ -483,23 +398,31 @@
 
     void moveAsideCorruptFile() throws IOException {
       assert hasCorruptHeader;
-    
+      renameSelf(".corrupt");
+    }
+
+    void moveAsideEmptyFile() throws IOException {
+      assert lastTxId == HdfsConstants.INVALID_TXID;
+      renameSelf(".empty");
+    }
+      
+    private void renameSelf(String newSuffix) throws IOException {
       File src = file;
-      File dst = new File(src.getParent(), src.getName() + ".corrupt");
+      File dst = new File(src.getParent(), src.getName() + newSuffix);
       boolean success = src.renameTo(dst);
       if (!success) {
         throw new IOException(
-          "Couldn't rename corrupt log " + src + " to " + dst);
+          "Couldn't rename log " + src + " to " + dst);
       }
       file = dst;
     }
-    
+
     @Override
     public String toString() {
       return String.format("EditLogFile(file=%s,first=%019d,last=%019d,"
-                           +"inProgress=%b,hasCorruptHeader=%b,numTx=%d)",
+                           +"inProgress=%b,hasCorruptHeader=%b)",
                            file.toString(), firstTxId, lastTxId,
-                           isInProgress(), hasCorruptHeader, numTx);
+                           isInProgress(), hasCorruptHeader);
     }
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
index fa3d018..d04c06b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
@@ -29,6 +29,7 @@
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 
 import org.apache.commons.logging.Log;
@@ -43,12 +44,14 @@
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
+import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import com.google.common.net.InetAddresses;
 
 /**
  * This class is used in Namesystem's jetty to retrieve a file.
@@ -83,11 +86,12 @@
       final Configuration conf = 
         (Configuration)getServletContext().getAttribute(JspHelper.CURRENT_CONF);
       
-      if(UserGroupInformation.isSecurityEnabled() && 
-          !isValidRequestor(request.getUserPrincipal().getName(), conf)) {
+      if (UserGroupInformation.isSecurityEnabled() && 
+          !isValidRequestor(context, request.getUserPrincipal().getName(), conf)) {
         response.sendError(HttpServletResponse.SC_FORBIDDEN, 
-            "Only Namenode and Secondary Namenode may access this servlet");
-        LOG.warn("Received non-NN/SNN request for image or edits from " 
+            "Only Namenode, Secondary Namenode, and administrators may access " +
+            "this servlet");
+        LOG.warn("Received non-NN/SNN/administrator request for image or edits from " 
             + request.getUserPrincipal().getName() + " at " + request.getRemoteHost());
         return;
       }
@@ -156,6 +160,11 @@
                 return null;
               }
               
+              // We may have lost our ticket since last checkpoint, log in again, just in case
+              if (UserGroupInformation.isSecurityEnabled()) {
+                UserGroupInformation.getCurrentUser().reloginFromKeytab();
+              }
+              
               // issue a HTTP get request to download the new fsimage 
               MD5Hash downloadImageDigest =
                 TransferFsImage.downloadImageToStorage(
@@ -207,8 +216,8 @@
   }
   
   @VisibleForTesting
-  static boolean isValidRequestor(String remoteUser, Configuration conf)
-      throws IOException {
+  static boolean isValidRequestor(ServletContext context, String remoteUser,
+      Configuration conf) throws IOException {
     if(remoteUser == null) { // This really shouldn't happen...
       LOG.warn("Received null remoteUser while authorizing access to getImage servlet");
       return false;
@@ -235,11 +244,17 @@
 
     for(String v : validRequestors) {
       if(v != null && v.equals(remoteUser)) {
-        if(LOG.isInfoEnabled()) LOG.info("GetImageServlet allowing: " + remoteUser);
+        LOG.info("GetImageServlet allowing checkpointer: " + remoteUser);
         return true;
       }
     }
-    if(LOG.isInfoEnabled()) LOG.info("GetImageServlet rejecting: " + remoteUser);
+    
+    if (HttpServer.userHasAdministratorAccess(context, remoteUser)) {
+      LOG.info("GetImageServlet allowing administrator: " + remoteUser);
+      return true;
+    }
+    
+    LOG.info("GetImageServlet rejecting: " + remoteUser);
     return false;
   }
   
@@ -282,8 +297,7 @@
     return "putimage=1" +
       "&" + TXID_PARAM + "=" + txid +
       "&port=" + imageListenAddress.getPort() +
-      "&machine=" + imageListenAddress.getHostName()
-      + "&" + STORAGEINFO_PARAM + "=" +
+      "&" + STORAGEINFO_PARAM + "=" +
       storage.toColonSeparatedString();
   }
 
@@ -310,7 +324,10 @@
       Map<String, String[]> pmap = request.getParameterMap();
       isGetImage = isGetEdit = isPutImage = fetchLatest = false;
       remoteport = 0;
-      machineName = null;
+      machineName = request.getRemoteHost();
+      if (InetAddresses.isInetAddress(machineName)) {
+        machineName = NetUtils.getHostNameOfIP(machineName);
+      }
 
       for (Map.Entry<String, String[]> entry : pmap.entrySet()) {
         String key = entry.getKey();
@@ -335,8 +352,6 @@
           txId = parseLongParam(request, TXID_PARAM);
         } else if (key.equals("port")) { 
           remoteport = new Integer(val[0]).intValue();
-        } else if (key.equals("machine")) { 
-          machineName = val[0];
         } else if (key.equals(STORAGEINFO_PARAM)) {
           storageInfoString = val[0];
         }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index e940b61..c9d26e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -177,14 +177,14 @@
     return (short)PermissionStatusFormat.MODE.retrieve(permission);
   }
   /** Set the {@link FsPermission} of this {@link INode} */
-  protected void setPermission(FsPermission permission) {
+  void setPermission(FsPermission permission) {
     updatePermissionStatus(PermissionStatusFormat.MODE, permission.toShort());
   }
 
   /**
    * Check whether it's a directory
    */
-  public abstract boolean isDirectory();
+  abstract boolean isDirectory();
 
   /**
    * Collect all the blocks in all children of this INode.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index b3485ec..3bfb335 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -41,9 +41,9 @@
   //Format: [16 bits for replication][48 bits for PreferredBlockSize]
   static final long HEADERMASK = 0xffffL << BLOCKBITS;
 
-  protected long header;
+  private long header;
 
-  protected BlockInfo blocks[] = null;
+  BlockInfo blocks[] = null;
 
   INodeFile(PermissionStatus permissions,
             int nrBlocks, short replication, long modificationTime,
@@ -52,12 +52,7 @@
         modificationTime, atime, preferredBlockSize);
   }
 
-  protected INodeFile() {
-    blocks = null;
-    header = 0;
-  }
-
-  protected INodeFile(PermissionStatus permissions, BlockInfo[] blklist,
+  INodeFile(PermissionStatus permissions, BlockInfo[] blklist,
                       short replication, long modificationTime,
                       long atime, long preferredBlockSize) {
     super(permissions, modificationTime, atime);
@@ -71,47 +66,40 @@
    * Since this is a file,
    * the {@link FsAction#EXECUTE} action, if any, is ignored.
    */
-  protected void setPermission(FsPermission permission) {
+  void setPermission(FsPermission permission) {
     super.setPermission(permission.applyUMask(UMASK));
   }
 
-  public boolean isDirectory() {
+  boolean isDirectory() {
     return false;
   }
 
-  /**
-   * Get block replication for the file 
-   * @return block replication value
-   */
+  /** @return the replication factor of the file. */
+  @Override
   public short getReplication() {
     return (short) ((header & HEADERMASK) >> BLOCKBITS);
   }
 
-  public void setReplication(short replication) {
+  void setReplication(short replication) {
     if(replication <= 0)
        throw new IllegalArgumentException("Unexpected value for the replication");
     header = ((long)replication << BLOCKBITS) | (header & ~HEADERMASK);
   }
 
-  /**
-   * Get preferred block size for the file
-   * @return preferred block size in bytes
-   */
+  /** @return preferred block size (in bytes) of the file. */
+  @Override
   public long getPreferredBlockSize() {
-        return header & ~HEADERMASK;
+    return header & ~HEADERMASK;
   }
 
-  public void setPreferredBlockSize(long preferredBlkSize)
-  {
+  private void setPreferredBlockSize(long preferredBlkSize) {
     if((preferredBlkSize < 0) || (preferredBlkSize > ~HEADERMASK ))
        throw new IllegalArgumentException("Unexpected value for the block size");
     header = (header & HEADERMASK) | (preferredBlkSize & ~HEADERMASK);
   }
 
-  /**
-   * Get file blocks 
-   * @return file blocks
-   */
+  /** @return the blocks of the file. */
+  @Override
   public BlockInfo[] getBlocks() {
     return this.blocks;
   }
@@ -152,9 +140,7 @@
     }
   }
 
-  /**
-   * Set file block
-   */
+  /** Set the block of the file at the given index. */
   public void setBlock(int idx, BlockInfo blk) {
     this.blocks[idx] = blk;
   }
@@ -171,6 +157,7 @@
     return 1;
   }
   
+  @Override
   public String getName() {
     // Get the full path name of this inode.
     return getFullPathName();
@@ -215,7 +202,7 @@
     return diskspaceConsumed(blocks);
   }
   
-  long diskspaceConsumed(Block[] blkArr) {
+  private long diskspaceConsumed(Block[] blkArr) {
     long size = 0;
     if(blkArr == null) 
       return 0;
@@ -245,26 +232,12 @@
     return blocks[blocks.length - 2];
   }
 
-  /**
-   * Get the last block of the file.
-   * Make sure it has the right type.
-   */
-  public <T extends BlockInfo> T getLastBlock() throws IOException {
-    if (blocks == null || blocks.length == 0)
-      return null;
-    T returnBlock = null;
-    try {
-      @SuppressWarnings("unchecked")  // ClassCastException is caught below
-      T tBlock = (T)blocks[blocks.length - 1];
-      returnBlock = tBlock;
-    } catch(ClassCastException cce) {
-      throw new IOException("Unexpected last block type: " 
-          + blocks[blocks.length - 1].getClass().getSimpleName());
-    }
-    return returnBlock;
+  @Override
+  public BlockInfo getLastBlock() throws IOException {
+    return blocks == null || blocks.length == 0? null: blocks[blocks.length-1];
   }
 
-  /** @return the number of blocks */ 
+  @Override
   public int numBlocks() {
     return blocks == null ? 0 : blocks.length;
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
index f9c622d..9efeca7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
@@ -19,6 +19,7 @@
 
 import java.io.Closeable;
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -46,26 +47,17 @@
   void finalizeLogSegment(long firstTxId, long lastTxId) throws IOException;
 
    /**
-   * Get the input stream starting with fromTxnId from this journal manager
+   * Get a list of edit log input streams.  The list will start with the
+   * stream that contains fromTxnId, and continue until the end of the journal
+   * being managed.
+   * 
    * @param fromTxnId the first transaction id we want to read
    * @param inProgressOk whether or not in-progress streams should be returned
-   * @return the stream starting with transaction fromTxnId
-   * @throws IOException if a stream cannot be found.
-   */
-  EditLogInputStream getInputStream(long fromTxnId, boolean inProgressOk)
-    throws IOException;
-
-  /**
-   * Get the number of transaction contiguously available from fromTxnId.
    *
-   * @param fromTxnId Transaction id to count from
-   * @param inProgressOk whether or not in-progress streams should be counted
-   * @return The number of transactions available from fromTxnId
-   * @throws IOException if the journal cannot be read.
-   * @throws CorruptionException if there is a gap in the journal at fromTxnId.
+   * @return a list of streams
    */
-  long getNumberOfTransactions(long fromTxnId, boolean inProgressOk)
-      throws IOException, CorruptionException;
+  void selectInputStreams(Collection<EditLogInputStream> streams,
+      long fromTxnId, boolean inProgressOk);
 
   /**
    * Set the amount of memory that this stream should use to buffer edits
@@ -92,7 +84,7 @@
    * Close the journal manager, freeing any resources it may hold.
    */
   void close() throws IOException;
-
+  
   /** 
    * Indicate that a journal is cannot be used to load a certain range of 
    * edits.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
index 2aaf86c..d0ef373 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
@@ -20,7 +20,10 @@
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
+import java.util.Comparator;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.SortedSet;
 
@@ -32,11 +35,13 @@
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import com.google.common.collect.ComparisonChain;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableListMultimap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Multimaps;
 import com.google.common.collect.Sets;
+import com.google.common.collect.TreeMultiset;
 
 /**
  * Manages a collection of Journals. None of the methods are synchronized, it is
@@ -48,6 +53,17 @@
 
   static final Log LOG = LogFactory.getLog(FSEditLog.class);
   
+  static final public Comparator<EditLogInputStream>
+    EDIT_LOG_INPUT_STREAM_COMPARATOR = new Comparator<EditLogInputStream>() {
+      @Override
+      public int compare(EditLogInputStream a, EditLogInputStream b) {
+        return ComparisonChain.start().
+          compare(a.getFirstTxId(), b.getFirstTxId()).
+          compare(b.getLastTxId(), a.getLastTxId()).
+          result();
+      }
+    };
+  
   /**
    * Container for a JournalManager paired with its currently
    * active stream.
@@ -195,75 +211,57 @@
     }, "close journal");
   }
 
-  
   /**
-   * Find the best editlog input stream to read from txid.
-   * If a journal throws an CorruptionException while reading from a txn id,
-   * it means that it has more transactions, but can't find any from fromTxId. 
-   * If this is the case and no other journal has transactions, we should throw
-   * an exception as it means more transactions exist, we just can't load them.
-   *
-   * @param fromTxnId Transaction id to start from.
-   * @return A edit log input stream with tranactions fromTxId 
-   *         or null if no more exist
+   * In this function, we get a bunch of streams from all of our JournalManager
+   * objects.  Then we add these to the collection one by one.
+   * 
+   * @param streams          The collection to add the streams to.  It may or 
+   *                         may not be sorted-- this is up to the caller.
+   * @param fromTxId         The transaction ID to start looking for streams at
+   * @param inProgressOk     Should we consider unfinalized streams?
    */
   @Override
-  public EditLogInputStream getInputStream(long fromTxnId, boolean inProgressOk)
-      throws IOException {
-    JournalManager bestjm = null;
-    long bestjmNumTxns = 0;
-    CorruptionException corruption = null;
-
+  public void selectInputStreams(Collection<EditLogInputStream> streams,
+      long fromTxId, boolean inProgressOk) {
+    final TreeMultiset<EditLogInputStream> allStreams =
+        TreeMultiset.create(EDIT_LOG_INPUT_STREAM_COMPARATOR);
     for (JournalAndStream jas : journals) {
-      if (jas.isDisabled()) continue;
-      
-      JournalManager candidate = jas.getManager();
-      long candidateNumTxns = 0;
-      try {
-        candidateNumTxns = candidate.getNumberOfTransactions(fromTxnId,
-            inProgressOk);
-      } catch (CorruptionException ce) {
-        corruption = ce;
-      } catch (IOException ioe) {
-        LOG.warn("Unable to read input streams from JournalManager " + candidate,
-            ioe);
-        continue; // error reading disk, just skip
-      }
-      
-      if (candidateNumTxns > bestjmNumTxns) {
-        bestjm = candidate;
-        bestjmNumTxns = candidateNumTxns;
-      }
-    }
-    
-    if (bestjm == null) {
-      if (corruption != null) {
-        throw new IOException("No non-corrupt logs for txid " 
-                                        + fromTxnId, corruption);
-      } else {
-        return null;
-      }
-    }
-    return bestjm.getInputStream(fromTxnId, inProgressOk);
-  }
-  
-  @Override
-  public long getNumberOfTransactions(long fromTxnId, boolean inProgressOk)
-      throws IOException {
-    long num = 0;
-    for (JournalAndStream jas: journals) {
       if (jas.isDisabled()) {
         LOG.info("Skipping jas " + jas + " since it's disabled");
         continue;
+      }
+      jas.getManager().selectInputStreams(allStreams, fromTxId, inProgressOk);
+    }
+    // We want to group together all the streams that start on the same start
+    // transaction ID.  To do this, we maintain an accumulator (acc) of all
+    // the streams we've seen at a given start transaction ID.  When we see a
+    // higher start transaction ID, we select a stream from the accumulator and
+    // clear it.  Then we begin accumulating streams with the new, higher start
+    // transaction ID.
+    LinkedList<EditLogInputStream> acc =
+        new LinkedList<EditLogInputStream>();
+    for (EditLogInputStream elis : allStreams) {
+      if (acc.isEmpty()) {
+        acc.add(elis);
       } else {
-        long newNum = jas.getManager().getNumberOfTransactions(fromTxnId,
-            inProgressOk);
-        if (newNum > num) {
-          num = newNum;
+        long accFirstTxId = acc.get(0).getFirstTxId();
+        if (accFirstTxId == elis.getFirstTxId()) {
+          acc.add(elis);
+        } else if (accFirstTxId < elis.getFirstTxId()) {
+          streams.add(acc.get(0));
+          acc.clear();
+          acc.add(elis);
+        } else if (accFirstTxId > elis.getFirstTxId()) {
+          throw new RuntimeException("sorted set invariants violated!  " +
+              "Got stream with first txid " + elis.getFirstTxId() +
+              ", but the last firstTxId was " + accFirstTxId);
         }
       }
     }
-    return num;
+    if (!acc.isEmpty()) {
+      streams.add(acc.get(0));
+      acc.clear();
+    }
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index a7bb21d..ccd942c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -31,6 +31,7 @@
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.Properties;
 import java.util.UUID;
 import java.util.concurrent.CopyOnWriteArrayList;
@@ -58,6 +59,7 @@
 import com.google.common.base.Preconditions;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
 
 /**
  * NNStorage is responsible for management of the StorageDirectories used by
@@ -1076,13 +1078,14 @@
    * inspected each directory.
    * 
    * <b>Note:</b> this can mutate the storage info fields (ctime, version, etc).
-   * @throws IOException if no valid storage dirs are found
+   * @throws IOException if no valid storage dirs are found or no valid layout version
    */
   FSImageStorageInspector readAndInspectDirs()
       throws IOException {
-    int minLayoutVersion = Integer.MAX_VALUE; // the newest
-    int maxLayoutVersion = Integer.MIN_VALUE; // the oldest
-    
+    Integer layoutVersion = null;
+    boolean multipleLV = false;
+    StringBuilder layoutVersions = new StringBuilder();
+
     // First determine what range of layout versions we're going to inspect
     for (Iterator<StorageDirectory> it = dirIterator();
          it.hasNext();) {
@@ -1092,24 +1095,29 @@
         continue;
       }
       readProperties(sd); // sets layoutVersion
-      minLayoutVersion = Math.min(minLayoutVersion, getLayoutVersion());
-      maxLayoutVersion = Math.max(maxLayoutVersion, getLayoutVersion());
+      int lv = getLayoutVersion();
+      if (layoutVersion == null) {
+        layoutVersion = Integer.valueOf(lv);
+      } else if (!layoutVersion.equals(lv)) {
+        multipleLV = true;
+      }
+      layoutVersions.append("(").append(sd.getRoot()).append(", ").append(lv).append(") ");
     }
     
-    if (minLayoutVersion > maxLayoutVersion) {
+    if (layoutVersion == null) {
       throw new IOException("No storage directories contained VERSION information");
     }
-    assert minLayoutVersion <= maxLayoutVersion;
-    
-    // If we have any storage directories with the new layout version
+    if (multipleLV) {            
+      throw new IOException(
+          "Storage directories containe multiple layout versions: "
+              + layoutVersions);
+    }
+    // If the storage directories are with the new layout version
     // (ie edits_<txnid>) then use the new inspector, which will ignore
     // the old format dirs.
     FSImageStorageInspector inspector;
-    if (LayoutVersion.supports(Feature.TXID_BASED_LAYOUT, minLayoutVersion)) {
+    if (LayoutVersion.supports(Feature.TXID_BASED_LAYOUT, getLayoutVersion())) {
       inspector = new FSImageTransactionalStorageInspector();
-      if (!LayoutVersion.supports(Feature.TXID_BASED_LAYOUT, maxLayoutVersion)) {
-        FSImage.LOG.warn("Ignoring one or more storage directories with old layouts");
-      }
     } else {
       inspector = new FSImagePreTransactionalStorageInspector();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 56ba8a2..6416db1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -36,6 +36,7 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.ha.HAServiceStatus;
 import org.apache.hadoop.ha.HealthCheckFailedException;
 import org.apache.hadoop.ha.ServiceFailedException;
@@ -69,6 +70,7 @@
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.util.AtomicFileOutputStream;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
@@ -145,17 +147,25 @@
   }
   
   /**
-   * HDFS federation configuration can have two types of parameters:
+   * HDFS configuration can have three types of parameters:
    * <ol>
-   * <li>Parameter that is common for all the name services in the cluster.</li>
-   * <li>Parameters that are specific to a name service. This keys are suffixed
+   * <li>Parameters that are common for all the name services in the cluster.</li>
+   * <li>Parameters that are specific to a name service. These keys are suffixed
    * with nameserviceId in the configuration. For example,
    * "dfs.namenode.rpc-address.nameservice1".</li>
+   * <li>Parameters that are specific to a single name node. These keys are suffixed
+   * with nameserviceId and namenodeId in the configuration. for example,
+   * "dfs.namenode.rpc-address.nameservice1.namenode1"</li>
    * </ol>
    * 
-   * Following are nameservice specific keys.
+   * In the latter cases, operators may specify the configuration without
+   * any suffix, with a nameservice suffix, or with a nameservice and namenode
+   * suffix. The more specific suffix will take precedence.
+   * 
+   * These keys are specific to a given namenode, and thus may be configured
+   * globally, for a nameservice, or for a specific namenode within a nameservice.
    */
-  public static final String[] NAMESERVICE_SPECIFIC_KEYS = {
+  public static final String[] NAMENODE_SPECIFIC_KEYS = {
     DFS_NAMENODE_RPC_ADDRESS_KEY,
     DFS_NAMENODE_NAME_DIR_KEY,
     DFS_NAMENODE_EDITS_DIR_KEY,
@@ -170,8 +180,19 @@
     DFS_NAMENODE_BACKUP_ADDRESS_KEY,
     DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
     DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY,
+    DFS_NAMENODE_USER_NAME_KEY,
     DFS_HA_FENCE_METHODS_KEY,
-    DFS_NAMENODE_USER_NAME_KEY
+    DFS_HA_ZKFC_PORT_KEY,
+    DFS_HA_FENCE_METHODS_KEY
+  };
+  
+  /**
+   * @see #NAMENODE_SPECIFIC_KEYS
+   * These keys are specific to a nameservice, but may not be overridden
+   * for a specific namenode.
+   */
+  public static final String[] NAMESERVICE_SPECIFIC_KEYS = {
+    DFS_HA_AUTO_FAILOVER_ENABLED_KEY
   };
   
   public long getProtocolVersion(String protocol, 
@@ -206,6 +227,7 @@
   private final boolean haEnabled;
   private final HAContext haContext;
   protected boolean allowStaleStandbyReads;
+  private Runtime runtime = Runtime.getRuntime();
 
   
   /** httpServer */
@@ -481,11 +503,16 @@
   }
   
   private void startTrashEmptier(Configuration conf) throws IOException {
-    long trashInterval 
-      = conf.getLong(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, 
-                     CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
-    if(trashInterval == 0)
+    long trashInterval = conf.getLong(
+        CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY,
+        CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
+    if (trashInterval == 0) {
       return;
+    } else if (trashInterval < 0) {
+      throw new IOException("Cannot start tresh emptier with negative interval."
+          + " Set " + CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY + " to a"
+          + " positive value.");
+    }
     this.emptier = new Thread(new Trash(conf).getEmptier(), "Trash Emptier");
     this.emptier.setDaemon(true);
     this.emptier.start();
@@ -1132,15 +1159,18 @@
     if ((nameserviceId != null && !nameserviceId.isEmpty()) || 
         (namenodeId != null && !namenodeId.isEmpty())) {
       if (nameserviceId != null) {
-        conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
+        conf.set(DFS_NAMESERVICE_ID, nameserviceId);
       }
       if (namenodeId != null) {
         conf.set(DFS_HA_NAMENODE_ID_KEY, namenodeId);
       }
       
       DFSUtil.setGenericConf(conf, nameserviceId, namenodeId,
+          NAMENODE_SPECIFIC_KEYS);
+      DFSUtil.setGenericConf(conf, nameserviceId, null,
           NAMESERVICE_SPECIFIC_KEYS);
     }
+    
     if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
       URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
           + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
@@ -1235,14 +1265,37 @@
     }
     return state.getServiceState();
   }
+  
+  @VisibleForTesting
+  public synchronized void setRuntimeForTesting(Runtime runtime) {
+    this.runtime = runtime;
+  }
 
   /**
-   * Class used as expose {@link NameNode} as context to {@link HAState}
+   * Shutdown the NN immediately in an ungraceful way. Used when it would be
+   * unsafe for the NN to continue operating, e.g. during a failed HA state
+   * transition.
    * 
-   * TODO(HA):
-   * When entering and exiting state, on failing to start services,
-   * appropriate action is needed todo either shutdown the node or recover
-   * from failure.
+   * @param t exception which warrants the shutdown. Printed to the NN log
+   *          before exit.
+   * @throws ServiceFailedException thrown only for testing.
+   */
+  private synchronized void doImmediateShutdown(Throwable t)
+      throws ServiceFailedException {
+    String message = "Error encountered requiring NN shutdown. " +
+        "Shutting down immediately.";
+    try {
+      LOG.fatal(message, t);
+    } catch (Throwable ignored) {
+      // This is unlikely to happen, but there's nothing we can do if it does.
+    }
+    runtime.exit(1);
+    // This code is only reached during testing, when runtime is stubbed out.
+    throw new ServiceFailedException(message, t);
+  }
+  
+  /**
+   * Class used to expose {@link NameNode} as context to {@link HAState}
    */
   protected class NameNodeHAContext implements HAContext {
     @Override
@@ -1257,32 +1310,52 @@
 
     @Override
     public void startActiveServices() throws IOException {
-      namesystem.startActiveServices();
-      startTrashEmptier(conf);
+      try {
+        namesystem.startActiveServices();
+        startTrashEmptier(conf);
+      } catch (Throwable t) {
+        doImmediateShutdown(t);
+      }
     }
 
     @Override
     public void stopActiveServices() throws IOException {
-      if (namesystem != null) {
-        namesystem.stopActiveServices();
+      try {
+        if (namesystem != null) {
+          namesystem.stopActiveServices();
+        }
+        stopTrashEmptier();
+      } catch (Throwable t) {
+        doImmediateShutdown(t);
       }
-      stopTrashEmptier();
     }
 
     @Override
     public void startStandbyServices() throws IOException {
-      namesystem.startStandbyServices(conf);
+      try {
+        namesystem.startStandbyServices(conf);
+      } catch (Throwable t) {
+        doImmediateShutdown(t);
+      }
     }
 
     @Override
     public void prepareToStopStandbyServices() throws ServiceFailedException {
-      namesystem.prepareToStopStandbyServices();
+      try {
+        namesystem.prepareToStopStandbyServices();
+      } catch (Throwable t) {
+        doImmediateShutdown(t);
+      }
     }
     
     @Override
     public void stopStandbyServices() throws IOException {
-      if (namesystem != null) {
-        namesystem.stopStandbyServices();
+      try {
+        if (namesystem != null) {
+          namesystem.stopStandbyServices();
+        }
+      } catch (Throwable t) {
+        doImmediateShutdown(t);
       }
     }
     
@@ -1313,4 +1386,43 @@
   public boolean isStandbyState() {
     return (state.equals(STANDBY_STATE));
   }
+
+  /**
+   * Check that a request to change this node's HA state is valid.
+   * In particular, verifies that, if auto failover is enabled, non-forced
+   * requests from the HAAdmin CLI are rejected, and vice versa.
+   *
+   * @param req the request to check
+   * @throws AccessControlException if the request is disallowed
+   */
+  void checkHaStateChange(StateChangeRequestInfo req)
+      throws AccessControlException {
+    boolean autoHaEnabled = conf.getBoolean(DFS_HA_AUTO_FAILOVER_ENABLED_KEY,
+        DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT);
+    switch (req.getSource()) {
+    case REQUEST_BY_USER:
+      if (autoHaEnabled) {
+        throw new AccessControlException(
+            "Manual HA control for this NameNode is disallowed, because " +
+            "automatic HA is enabled.");
+      }
+      break;
+    case REQUEST_BY_USER_FORCED:
+      if (autoHaEnabled) {
+        LOG.warn("Allowing manual HA control from " +
+            Server.getRemoteAddress() +
+            " even though automatic HA is enabled, because the user " +
+            "specified the force flag");
+      }
+      break;
+    case REQUEST_BY_ZKFC:
+      if (!autoHaEnabled) {
+        throw new AccessControlException(
+            "Request from ZK failover controller at " +
+            Server.getRemoteAddress() + " denied since automatic HA " +
+            "is not enabled"); 
+      }
+      break;
+    }
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
index 2e62b8a..2798a83 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
@@ -165,9 +165,9 @@
       httpServer.setAttribute("datanode.https.port", datanodeSslPort
         .getPort());
     }
-    httpServer.setAttribute("name.node", nn);
-    httpServer.setAttribute("name.node.address", bindAddress);
-    httpServer.setAttribute("name.system.image", nn.getFSImage());
+    httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn);
+    httpServer.setAttribute(NAMENODE_ADDRESS_ATTRIBUTE_KEY, nn.getNameNodeAddress());
+    httpServer.setAttribute(FSIMAGE_ATTRIBUTE_KEY, nn.getFSImage());
     httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
     setupServlets(httpServer, conf);
     httpServer.start();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index b61fbe8..426f578 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -50,7 +50,6 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -712,10 +711,16 @@
 
   @Override // NamenodeProtocol
   public long getTransactionID() throws IOException {
-    namesystem.checkOperation(OperationCategory.CHECKPOINT);
-    return namesystem.getEditLog().getSyncTxId();
+    namesystem.checkOperation(OperationCategory.UNCHECKED);
+    return namesystem.getFSImage().getLastAppliedOrWrittenTxId();
   }
-
+  
+  @Override // NamenodeProtocol
+  public long getMostRecentCheckpointTxId() throws IOException {
+    namesystem.checkOperation(OperationCategory.UNCHECKED);
+    return namesystem.getFSImage().getMostRecentCheckpointTxId();
+  }
+  
   @Override // NamenodeProtocol
   public CheckpointSignature rollEditLog() throws IOException {
     return namesystem.rollEditLog();
@@ -972,14 +977,16 @@
   }
   
   @Override // HAServiceProtocol
-  public synchronized void transitionToActive() 
+  public synchronized void transitionToActive(StateChangeRequestInfo req) 
       throws ServiceFailedException, AccessControlException {
+    nn.checkHaStateChange(req);
     nn.transitionToActive();
   }
   
   @Override // HAServiceProtocol
-  public synchronized void transitionToStandby() 
+  public synchronized void transitionToStandby(StateChangeRequestInfo req) 
       throws ServiceFailedException, AccessControlException {
+    nn.checkHaStateChange(req);
     nn.transitionToStandby();
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 7cb868b..2100b2c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -53,6 +53,8 @@
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.UserGroupInformation;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * This class provides rudimentary checking of DFS volumes for errors and
  * sub-optimal conditions.
@@ -244,7 +246,8 @@
     out.println();
   }
   
-  private void check(String parent, HdfsFileStatus file, Result res) throws IOException {
+  @VisibleForTesting
+  void check(String parent, HdfsFileStatus file, Result res) throws IOException {
     String path = file.getFullName(parent);
     boolean isOpen = false;
 
@@ -313,6 +316,7 @@
       DatanodeInfo[] locs = lBlk.getLocations();
       res.totalReplicas += locs.length;
       short targetFileReplication = file.getReplication();
+      res.numExpectedReplicas += targetFileReplication;
       if (locs.length > targetFileReplication) {
         res.excessiveReplicas += (locs.length - targetFileReplication);
         res.numOverReplicatedBlocks += 1;
@@ -608,29 +612,31 @@
   /**
    * FsckResult of checking, plus overall DFS statistics.
    */
-  private static class Result {
-    private List<String> missingIds = new ArrayList<String>();
-    private long missingSize = 0L;
-    private long corruptFiles = 0L;
-    private long corruptBlocks = 0L;
-    private long excessiveReplicas = 0L;
-    private long missingReplicas = 0L;
-    private long numOverReplicatedBlocks = 0L;
-    private long numUnderReplicatedBlocks = 0L;
-    private long numMisReplicatedBlocks = 0L;  // blocks that do not satisfy block placement policy
-    private long numMinReplicatedBlocks = 0L;  // minimally replicatedblocks
-    private long totalBlocks = 0L;
-    private long totalOpenFilesBlocks = 0L;
-    private long totalFiles = 0L;
-    private long totalOpenFiles = 0L;
-    private long totalDirs = 0L;
-    private long totalSize = 0L;
-    private long totalOpenFilesSize = 0L;
-    private long totalReplicas = 0L;
+  @VisibleForTesting
+  static class Result {
+    List<String> missingIds = new ArrayList<String>();
+    long missingSize = 0L;
+    long corruptFiles = 0L;
+    long corruptBlocks = 0L;
+    long excessiveReplicas = 0L;
+    long missingReplicas = 0L;
+    long numOverReplicatedBlocks = 0L;
+    long numUnderReplicatedBlocks = 0L;
+    long numMisReplicatedBlocks = 0L;  // blocks that do not satisfy block placement policy
+    long numMinReplicatedBlocks = 0L;  // minimally replicatedblocks
+    long totalBlocks = 0L;
+    long numExpectedReplicas = 0L;
+    long totalOpenFilesBlocks = 0L;
+    long totalFiles = 0L;
+    long totalOpenFiles = 0L;
+    long totalDirs = 0L;
+    long totalSize = 0L;
+    long totalOpenFilesSize = 0L;
+    long totalReplicas = 0L;
 
     final short replication;
     
-    private Result(Configuration conf) {
+    Result(Configuration conf) {
       this.replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 
                                             DFSConfigKeys.DFS_REPLICATION_DEFAULT);
     }
@@ -726,7 +732,7 @@
               missingReplicas);
       if (totalReplicas > 0) {
         res.append(" (").append(
-            ((float) (missingReplicas * 100) / (float) totalReplicas)).append(
+            ((float) (missingReplicas * 100) / (float) numExpectedReplicas)).append(
             " %)");
       }
       return res.toString();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
index f284aaa..f5eae8f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
@@ -67,16 +67,16 @@
       return "";
     return "Safe mode is ON. <em>" + fsn.getSafeModeTip() + "</em><br>";
   }
-  
+
   /**
    * returns security mode of the cluster (namenode)
    * @return "on" if security is on, and "off" otherwise
    */
-  static String getSecurityModeText() {  
+  static String getSecurityModeText() {
     if(UserGroupInformation.isSecurityEnabled()) {
-      return "Security is <em>ON</em> <br>";
+      return "<div class=\"security\">Security is <em>ON</em></div>";
     } else {
-      return "Security is <em>OFF</em> <br>";
+      return "<div class=\"security\">Security is <em>OFF</em></div>";
     }
   }
 
@@ -99,22 +99,22 @@
     long used = (totalMemory * 100) / commitedMemory;
     long usedNonHeap = (totalNonHeap * 100) / commitedNonHeap;
 
-    String str = inodes + " files and directories, " + blocks + " blocks = "
+    String str = "<div>" + inodes + " files and directories, " + blocks + " blocks = "
         + (inodes + blocks) + " total";
     if (maxobjects != 0) {
       long pct = ((inodes + blocks) * 100) / maxobjects;
       str += " / " + maxobjects + " (" + pct + "%)";
     }
-    str += ".<br>";
-    str += "Heap Memory used " + StringUtils.byteDesc(totalMemory) + " is "
-        + " " + used + "% of Commited Heap Memory " 
+    str += ".</div>";
+    str += "<div>Heap Memory used " + StringUtils.byteDesc(totalMemory) + " is "
+        + " " + used + "% of Commited Heap Memory "
         + StringUtils.byteDesc(commitedMemory)
         + ". Max Heap Memory is " + StringUtils.byteDesc(maxMemory) +
-        ". <br>";
-    str += "Non Heap Memory used " + StringUtils.byteDesc(totalNonHeap) + " is"
+        ". </div>";
+    str += "<div>Non Heap Memory used " + StringUtils.byteDesc(totalNonHeap) + " is"
         + " " + usedNonHeap + "% of " + " Commited Non Heap Memory "
         + StringUtils.byteDesc(commitedNonHeap) + ". Max Non Heap Memory is "
-        + StringUtils.byteDesc(maxNonHeap) + ".<br>";
+        + StringUtils.byteDesc(maxNonHeap) + ".</div>";
     return str;
   }
 
@@ -133,19 +133,17 @@
 
   /** Return a table containing version information. */
   static String getVersionTable(FSNamesystem fsn) {
-    return "<div id='dfstable'><table>"
-        + "\n  <tr><td id='col1'>Started:</td><td>" + fsn.getStartTime()
-        + "</td></tr>\n" + "\n  <tr><td id='col1'>Version:</td><td>"
+    return "<div class='dfstable'><table>"
+        + "\n  <tr><td class='col1'>Started:</td><td>" + fsn.getStartTime()
+        + "</td></tr>\n" + "\n  <tr><td class='col1'>Version:</td><td>"
         + VersionInfo.getVersion() + ", " + VersionInfo.getRevision()
-        + "\n  <tr><td id='col1'>Compiled:</td><td>" + VersionInfo.getDate()
+        + "</td></tr>\n" + "\n  <tr><td class='col1'>Compiled:</td><td>" + VersionInfo.getDate()
         + " by " + VersionInfo.getUser() + " from " + VersionInfo.getBranch()
-        + "\n  <tr><td id='col1'>Upgrades:</td><td>"
-        + getUpgradeStatusText(fsn) 
-        + "\n  <tr><td id='col1'>Cluster ID:</td><td>" + fsn.getClusterId()
-        + "</td></tr>\n" 
-        + "\n  <tr><td id='col1'>Block Pool ID:</td><td>" + fsn.getBlockPoolId()
-        + "</td></tr>\n" 
-        + "\n</table></div>";
+        + "</td></tr>\n  <tr><td class='col1'>Upgrades:</td><td>"
+        + getUpgradeStatusText(fsn)
+        + "</td></tr>\n  <tr><td class='col1'>Cluster ID:</td><td>" + fsn.getClusterId()
+        + "</td></tr>\n  <tr><td class='col1'>Block Pool ID:</td><td>" + fsn.getBlockPoolId()
+        + "</td></tr>\n</table></div>";
   }
 
   /**
@@ -157,14 +155,15 @@
     if (missingBlocks > 0) {
       StringBuilder result = new StringBuilder();
 
-      // Warning class is typically displayed in RED
-      result.append("<br/><a class=\"warning\" href=\"/corrupt_files.jsp\" title=\"List corrupt files\">\n");
+      // Warning class is typically displayed in RED.
+      result.append("<div>"); // opening tag of outer <div>.
+      result.append("<a class=\"warning\" href=\"/corrupt_files.jsp\" title=\"List corrupt files\">\n");
       result.append("<b>WARNING : There are " + missingBlocks
           + " missing blocks. Please check the logs or run fsck in order to identify the missing blocks.</b>");
       result.append("</a>");
 
-      result.append("<br/><div class=\"small\">See the Hadoop FAQ for common causes and potential solutions.");
-      result.append("<br/><br/>\n");
+      result.append("<div class=\"small\">See the Hadoop FAQ for common causes and potential solutions.</div>");
+      result.append("</div>\n"); // closing tag of outer <div>.
 
       return result.toString();
     }
@@ -205,11 +204,11 @@
 
       // FS Image storage configuration
       out.print("<h3> " + nn.getRole() + " Storage: </h3>");
-      out.print("<div id=\"dfstable\"> <table border=1 cellpadding=10 cellspacing=0 title=\"NameNode Storage\">\n"
+      out.print("<div class=\"dfstable\"> <table class=\"storage\" title=\"NameNode Storage\">\n"
               + "<thead><tr><td><b>Storage Directory</b></td><td><b>Type</b></td><td><b>State</b></td></tr></thead>");
 
       StorageDirectory st = null;
-      for (Iterator<StorageDirectory> it 
+      for (Iterator<StorageDirectory> it
              = fsImage.getStorage().dirIterator(); it.hasNext();) {
         st = it.next();
         String dir = "" + st.getRoot();
@@ -224,10 +223,10 @@
         String dir = "" + st.getRoot();
         String type = "" + st.getStorageDirType();
         out.print("<tr><td>" + dir + "</td><td>" + type
-            + "</td><td><font color=red>Failed</font></td></tr>");
+            + "</td><td><span class=\"failed\">Failed</span></td></tr>");
       }
 
-      out.print("</table></div><br>\n");
+      out.print("</table></div>\n");
     }
 
     void generateHealthReport(JspWriter out, NameNode nn,
@@ -320,7 +319,7 @@
               + "Number of Under-Replicated Blocks" + colTxt() + ":" + colTxt()
               + fsn.getBlockManager().getUnderReplicatedNotMissingBlocks(); 
       }
-      out.print("<div id=\"dfstable\"> <table>\n" + rowTxt() + colTxt()
+      out.print("<div class=\"dfstable\"> <table>\n" + rowTxt() + colTxt()
           + "Configured Capacity" + colTxt() + ":" + colTxt()
           + StringUtils.byteDesc(total) + rowTxt() + colTxt() + "DFS Used"
           + colTxt() + ":" + colTxt() + StringUtils.byteDesc(used) + rowTxt()
@@ -359,7 +358,7 @@
           + "</table></div><br>\n");
 
       if (live.isEmpty() && dead.isEmpty()) {
-        out.print("There are no datanodes in the cluster");
+        out.print("There are no datanodes in the cluster.");
       }
     }
   }
@@ -587,6 +586,10 @@
 
       whatNodes = request.getParameter("whatNodes"); // show only live or only
                                                      // dead nodes
+      if (null == whatNodes || whatNodes.isEmpty()) {
+        out.print("Invalid input");
+        return;
+      }
       sorterField = request.getParameter("sorter/field");
       sorterOrder = request.getParameter("sorter/order");
       if (sorterField == null)
@@ -629,7 +632,7 @@
         if (whatNodes.equals("LIVE")) {
           out.print("<a name=\"LiveNodes\" id=\"title\">" + "Live Datanodes : "
               + live.size() + "</a>"
-              + "<br><br>\n<table border=1 cellspacing=0>\n");
+              + "<br><br>\n<table class=\"nodes\">\n");
 
           counterReset();
 
@@ -714,6 +717,8 @@
             }
             out.print("</table>\n");
           }
+        } else {
+          out.print("Invalid input");
         }
         out.print("</div>");
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceContext.java
index c5c0c06..67ee88e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceContext.java
@@ -23,6 +23,7 @@
 import java.util.concurrent.CountDownLatch;
 
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.util.Canceler;
 
 import com.google.common.base.Preconditions;
 
@@ -36,20 +37,17 @@
   private final long txid;
   private final List<StorageDirectory> errorSDs =
     Collections.synchronizedList(new ArrayList<StorageDirectory>());
-
-  /**
-   * If the operation has been canceled, set to the reason why
-   * it has been canceled (eg standby moving to active)
-   */
-  private volatile String cancelReason = null;
   
+  private final Canceler canceller;
   private CountDownLatch completionLatch = new CountDownLatch(1);
-  
+
   SaveNamespaceContext(
       FSNamesystem sourceNamesystem,
-      long txid) {
+      long txid,
+      Canceler canceller) {
     this.sourceNamesystem = sourceNamesystem;
     this.txid = txid;
+    this.canceller = canceller;
   }
 
   FSNamesystem getSourceNamesystem() {
@@ -68,17 +66,6 @@
     return errorSDs;
   }
 
-  /**
-   * Requests that the current saveNamespace operation be
-   * canceled if it is still running.
-   * @param reason the reason why cancellation is requested
-   * @throws InterruptedException 
-   */
-  void cancel(String reason) throws InterruptedException {
-    this.cancelReason = reason;
-    completionLatch.await();
-  }
-  
   void markComplete() {
     Preconditions.checkState(completionLatch.getCount() == 1,
         "Context already completed!");
@@ -86,13 +73,9 @@
   }
 
   void checkCancelled() throws SaveNamespaceCancelledException {
-    if (cancelReason != null) {
+    if (canceller.isCancelled()) {
       throw new SaveNamespaceCancelledException(
-          cancelReason);
+          canceller.getCancellationReason());
     }
   }
-
-  boolean isCancelled() {
-    return cancelReason != null;
-  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/RemoteExecution.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamLimiter.java
similarity index 69%
copy from hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/RemoteExecution.java
copy to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamLimiter.java
index 6177c79..9742082 100644
--- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/RemoteExecution.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamLimiter.java
@@ -15,13 +15,16 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+package org.apache.hadoop.hdfs.server.namenode;
 
-package org.apache.hadoop.util;
-
-public interface RemoteExecution {
-  public void executeCommand (String remoteHostName, String user,
-          String  command) throws Exception;
-  public int getExitCode();
-  public String getOutput();
-  public String getCommandString();
-}
+/**
+ * An object that allows you to set a limit on a stream.  This limit
+ * represents the number of bytes that can be read without getting an
+ * exception.
+ */
+interface StreamLimiter {
+  /**
+   * Set a limit.  Calling this function clears any existing limit.
+   */
+  public void setLimit(long limit);
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
index 773038f..19f986b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
@@ -33,16 +33,10 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ha.HAServiceProtocol;
-import org.apache.hadoop.ha.HAServiceStatus;
-import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
-import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
 import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -52,10 +46,8 @@
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.tools.DFSHAAdmin;
-import org.apache.hadoop.hdfs.tools.NNHAServiceTarget;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
-import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Tool;
@@ -90,7 +82,7 @@
   // Exit/return codes.
   static final int ERR_CODE_FAILED_CONNECT = 2;
   static final int ERR_CODE_INVALID_VERSION = 3;
-  static final int ERR_CODE_OTHER_NN_NOT_ACTIVE = 4;
+  // Skip 4 - was used in previous versions, but no longer returned.
   static final int ERR_CODE_ALREADY_FORMATTED = 5;
   static final int ERR_CODE_LOGS_UNAVAILABLE = 6; 
 
@@ -142,12 +134,6 @@
         .getProxy();
   }
   
-  private HAServiceProtocol createHAProtocolProxy()
-      throws IOException {
-    return new NNHAServiceTarget(new HdfsConfiguration(conf), nsId, otherNNId)
-        .getProxy(conf, 15000);
-  }
-
   private int doRun() throws IOException {
 
     NamenodeProtocol proxy = createNNProtocolProxy();
@@ -184,29 +170,6 @@
         "           Layout version: " + nsInfo.getLayoutVersion() + "\n" +
         "=====================================================");
 
-    // Ensure the other NN is active - we can't force it to roll edit logs
-    // below if it's not active.
-    if (!isOtherNNActive()) {
-      String err = "NameNode " + nsId + "." + nnId + " at " + otherIpcAddr +
-          " is not currently in ACTIVE state.";
-      if (!interactive) {
-        LOG.fatal(err + " Please transition it to " +
-            "active before attempting to bootstrap a standby node.");
-        return ERR_CODE_OTHER_NN_NOT_ACTIVE;
-      }
-      
-      System.err.println(err);
-      if (ToolRunner.confirmPrompt(
-            "Do you want to automatically transition it to active now?")) {
-        transitionOtherNNActive();
-      } else {
-        LOG.fatal("User aborted. Exiting without bootstrapping standby.");
-        return ERR_CODE_OTHER_NN_NOT_ACTIVE;
-      }
-    }
-    
-
-    
     // Check with the user before blowing away data.
     if (!NameNode.confirmFormat(
             Sets.union(Sets.newHashSet(dirsToFormat),
@@ -214,13 +177,10 @@
             force, interactive)) {
       return ERR_CODE_ALREADY_FORMATTED;
     }
-
-    // Force the active to roll its log
-    CheckpointSignature csig = proxy.rollEditLog();
-    long imageTxId = csig.getMostRecentCheckpointTxId();
-    long rollTxId = csig.getCurSegmentTxId();
-
-
+    
+    long imageTxId = proxy.getMostRecentCheckpointTxId();
+    long curTxId = proxy.getTransactionID();
+    
     // Format the storage (writes VERSION file)
     NNStorage storage = new NNStorage(conf, dirsToFormat, editUrisToFormat);
     storage.format(nsInfo);
@@ -233,11 +193,11 @@
     
     // Ensure that we have enough edits already in the shared directory to
     // start up from the last checkpoint on the active.
-    if (!checkLogsAvailableForRead(image, imageTxId, rollTxId)) {
+    if (!checkLogsAvailableForRead(image, imageTxId, curTxId)) {
       return ERR_CODE_LOGS_UNAVAILABLE;
     }
     
-    image.getStorage().writeTransactionIdFileToStorage(rollTxId);
+    image.getStorage().writeTransactionIdFileToStorage(curTxId);
 
     // Download that checkpoint into our storage directories.
     MD5Hash hash = TransferFsImage.downloadImageToStorage(
@@ -247,32 +207,31 @@
     return 0;
   }
 
-  
-  private void transitionOtherNNActive()
-      throws AccessControlException, ServiceFailedException, IOException {
-    LOG.info("Transitioning the running namenode to active...");
-    createHAProtocolProxy().transitionToActive();    
-    LOG.info("Successful");
-  }
-
   private boolean checkLogsAvailableForRead(FSImage image, long imageTxId,
-      long rollTxId) {
-    
+      long curTxIdOnOtherNode) {
+
+    if (imageTxId == curTxIdOnOtherNode) {
+      // The other node hasn't written any logs since the last checkpoint.
+      // This can be the case if the NN was freshly formatted as HA, and
+      // then started in standby mode, so it has no edit logs at all.
+      return true;
+    }
     long firstTxIdInLogs = imageTxId + 1;
-    long lastTxIdInLogs = rollTxId - 1;
-    assert lastTxIdInLogs >= firstTxIdInLogs;
+    
+    assert curTxIdOnOtherNode >= firstTxIdInLogs :
+      "first=" + firstTxIdInLogs + " onOtherNode=" + curTxIdOnOtherNode;
     
     try {
       Collection<EditLogInputStream> streams =
         image.getEditLog().selectInputStreams(
-          firstTxIdInLogs, lastTxIdInLogs, false);
+          firstTxIdInLogs, curTxIdOnOtherNode, null, true);
       for (EditLogInputStream stream : streams) {
         IOUtils.closeStream(stream);
       }
       return true;
     } catch (IOException e) {
       String msg = "Unable to read transaction ids " +
-          firstTxIdInLogs + "-" + lastTxIdInLogs +
+          firstTxIdInLogs + "-" + curTxIdOnOtherNode +
           " from the configured shared edits storage " +
           Joiner.on(",").join(sharedEditsUris) + ". " +
           "Please copy these logs into the shared edits storage " + 
@@ -291,12 +250,6 @@
     return (nsInfo.getLayoutVersion() == HdfsConstants.LAYOUT_VERSION);
   }
   
-  private boolean isOtherNNActive()
-      throws AccessControlException, IOException {
-    HAServiceStatus status = createHAProtocolProxy().getServiceStatus();
-    return status.getState() == HAServiceState.ACTIVE;
-  }
-
   private void parseConfAndFindOtherNN() throws IOException {
     Configuration conf = getConf();
     nsId = DFSUtil.getNamenodeNameServiceId(conf);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
index c11f1d7..3733ee0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
@@ -201,7 +201,7 @@
       }
       Collection<EditLogInputStream> streams;
       try {
-        streams = editLog.selectInputStreams(lastTxnId + 1, 0, false);
+        streams = editLog.selectInputStreams(lastTxnId + 1, 0, null, false);
       } catch (IOException ioe) {
         // This is acceptable. If we try to tail edits in the middle of an edits
         // log roll, i.e. the last one has been finalized but the new inprogress
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
index bbec10c..c3ee0d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
@@ -34,6 +34,7 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.SaveNamespaceCancelledException;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
+import org.apache.hadoop.hdfs.util.Canceler;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -58,12 +59,16 @@
   private final CheckpointerThread thread;
   private String activeNNAddress;
   private InetSocketAddress myNNAddress;
+
+  private Object cancelLock = new Object();
+  private Canceler canceler;
   
   // Keep track of how many checkpoints were canceled.
   // This is for use in tests.
   private static int canceledCount = 0;
   
-  public StandbyCheckpointer(Configuration conf, FSNamesystem ns) {
+  public StandbyCheckpointer(Configuration conf, FSNamesystem ns)
+      throws IOException {
     this.namesystem = ns;
     this.checkpointConf = new CheckpointConf(conf); 
     this.thread = new CheckpointerThread();
@@ -74,8 +79,9 @@
   /**
    * Determine the address of the NN we are checkpointing
    * as well as our own HTTP address from the configuration.
+   * @throws IOException 
    */
-  private void setNameNodeAddresses(Configuration conf) {
+  private void setNameNodeAddresses(Configuration conf) throws IOException {
     // Look up our own address.
     String myAddrString = getHttpAddress(conf);
 
@@ -91,7 +97,7 @@
     myNNAddress = NetUtils.createSocketAddr(myAddrString);
   }
   
-  private String getHttpAddress(Configuration conf) {
+  private String getHttpAddress(Configuration conf) throws IOException {
     String configuredAddr = DFSUtil.getInfoServer(null, conf, false);
     
     // Use the hostname from the RPC address as a default, in case
@@ -112,7 +118,7 @@
    */
   private boolean checkAddress(String addrStr) {
     InetSocketAddress addr = NetUtils.createSocketAddr(addrStr);
-    return addr.getPort() != 0 && !addr.getAddress().isAnyLocalAddress();
+    return addr.getPort() != 0;
   }
 
   public void start() {
@@ -123,6 +129,7 @@
   }
   
   public void stop() throws IOException {
+    cancelAndPreventCheckpoints("Stopping checkpointer");
     thread.setShouldRun(false);
     thread.interrupt();
     try {
@@ -134,6 +141,7 @@
   }
 
   private void doCheckpoint() throws InterruptedException, IOException {
+    assert canceler != null;
     long txid;
     
     namesystem.writeLockInterruptibly();
@@ -153,8 +161,8 @@
             thisCheckpointTxId + ". Skipping...");
         return;
       }
-      
-      img.saveNamespace(namesystem);
+
+      img.saveNamespace(namesystem, canceler);
       txid = img.getStorage().getMostRecentCheckpointTxId();
       assert txid == thisCheckpointTxId : "expected to save checkpoint at txid=" +
         thisCheckpointTxId + " but instead saved at txid=" + txid;
@@ -173,16 +181,18 @@
    * and prevent any new checkpoints from starting for the next
    * minute or so.
    */
-  public void cancelAndPreventCheckpoints() throws ServiceFailedException {
-    try {
-      thread.preventCheckpointsFor(PREVENT_AFTER_CANCEL_MS);
-      // TODO(HA): there is a really narrow race here if we are just
-      // about to start a checkpoint - this won't cancel it!
-      namesystem.getFSImage().cancelSaveNamespace(
-          "About to exit standby state");
-    } catch (InterruptedException e) {
-      throw new ServiceFailedException(
-          "Interrupted while trying to cancel checkpoint");
+  public void cancelAndPreventCheckpoints(String msg) throws ServiceFailedException {
+    thread.preventCheckpointsFor(PREVENT_AFTER_CANCEL_MS);
+    synchronized (cancelLock) {
+      // Before beginning a checkpoint, the checkpointer thread
+      // takes this lock, and creates a canceler object.
+      // If the canceler is non-null, then a checkpoint is in
+      // progress and we need to cancel it. If it's null, then
+      // the operation has not started, meaning that the above
+      // time-based prevention will take effect.
+      if (canceler != null) {
+        canceler.cancel(msg);
+      }
     }
   }
   
@@ -272,10 +282,18 @@
                 "exceeds the configured interval " + checkpointConf.getPeriod());
             needCheckpoint = true;
           }
-          if (needCheckpoint && now < preventCheckpointsUntil) {
-            LOG.info("But skipping this checkpoint since we are about to failover!");
-            canceledCount++;
-          } else if (needCheckpoint) {
+          
+          synchronized (cancelLock) {
+            if (now < preventCheckpointsUntil) {
+              LOG.info("But skipping this checkpoint since we are about to failover!");
+              canceledCount++;
+              continue;
+            }
+            assert canceler == null;
+            canceler = new Canceler();
+          }
+          
+          if (needCheckpoint) {
             doCheckpoint();
             lastCheckpointTime = now;
           }
@@ -287,6 +305,10 @@
           continue;
         } catch (Throwable t) {
           LOG.error("Exception in doCheckpoint", t);
+        } finally {
+          synchronized (cancelLock) {
+            canceler = null;
+          }
         }
       }
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java
index da1c9bd..6c672b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java
@@ -21,9 +21,8 @@
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.Block;
 
-/** A class to implement an array of BlockLocations
- *  It provide efficient customized serialization/deserialization methods
- *  in stead of using the default array (de)serialization provided by RPC
+/**
+ * Maintains an array of blocks and their corresponding storage IDs.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -36,12 +35,12 @@
   @InterfaceStability.Evolving
   public static class BlockWithLocations {
     Block block;
-    String datanodeIDs[];
+    String storageIDs[];
     
     /** constructor */
-    public BlockWithLocations(Block b, String[] datanodes) {
-      block = b;
-      datanodeIDs = datanodes;
+    public BlockWithLocations(Block block, String[] storageIDs) {
+      this.block = block;
+      this.storageIDs = storageIDs;
     }
     
     /** get the block */
@@ -50,15 +49,15 @@
     }
     
     /** get the block's locations */
-    public String[] getDatanodes() {
-      return datanodeIDs;
+    public String[] getStorageIDs() {
+      return storageIDs;
     }
   }
 
   private BlockWithLocations[] blocks;
 
   /** Constructor with one parameter */
-  public BlocksWithLocations( BlockWithLocations[] blocks ) {
+  public BlocksWithLocations(BlockWithLocations[] blocks) {
     this.blocks = blocks;
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
index dda0a6f..b736d12 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
@@ -47,21 +47,6 @@
     this.softwareVersion = softwareVersion;
   }
 
-  public DatanodeRegistration(String ipAddr, int xferPort) {
-    this(ipAddr, xferPort, new StorageInfo(), new ExportedBlockKeys());
-  }
-
-  public DatanodeRegistration(String ipAddr, int xferPort, StorageInfo info,
-      ExportedBlockKeys keys) {
-    super(ipAddr, xferPort);
-    this.storageInfo = info;
-    this.exportedKeys = keys;
-  }
-  
-  public void setStorageInfo(StorageInfo storage) {
-    this.storageInfo = new StorageInfo(storage);
-  }
-
   public StorageInfo getStorageInfo() {
     return storageInfo;
   }
@@ -74,10 +59,6 @@
     return exportedKeys;
   }
   
-  public void setSoftwareVersion(String softwareVersion) {
-    this.softwareVersion = softwareVersion;
-  }
-  
   public String getSoftwareVersion() {
     return softwareVersion;
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
index d1a40c6..96b502b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
@@ -87,12 +87,18 @@
 
   /**
    * @return The most recent transaction ID that has been synced to
-   * persistent storage.
+   * persistent storage, or applied from persistent storage in the
+   * case of a non-active node.
    * @throws IOException
    */
   public long getTransactionID() throws IOException;
 
   /**
+   * Get the transaction ID of the most recent checkpoint.
+   */
+  public long getMostRecentCheckpointTxId() throws IOException;
+
+  /**
    * Closes the current edit log and opens a new one. The 
    * call fails if the file system is in SafeMode.
    * @throws IOException
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
new file mode 100644
index 0000000..b1163d6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ha.HAServiceTarget;
+import org.apache.hadoop.ha.ZKFailoverController;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HAUtil;
+import org.apache.hadoop.hdfs.HDFSPolicyProvider;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.ha.proto.HAZKInfoProtos.ActiveNodeInfo;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.util.GenericOptionsParser;
+import org.apache.hadoop.util.StringUtils;
+
+import com.google.protobuf.InvalidProtocolBufferException;
+
+@InterfaceAudience.Private
+public class DFSZKFailoverController extends ZKFailoverController {
+
+  private static final Log LOG =
+    LogFactory.getLog(DFSZKFailoverController.class);
+  private AccessControlList adminAcl;
+  /* the same as superclass's localTarget, but with the more specfic NN type */
+  private final NNHAServiceTarget localNNTarget;
+
+  @Override
+  protected HAServiceTarget dataToTarget(byte[] data) {
+    ActiveNodeInfo proto;
+    try {
+      proto = ActiveNodeInfo.parseFrom(data);
+    } catch (InvalidProtocolBufferException e) {
+      throw new RuntimeException("Invalid data in ZK: " +
+          StringUtils.byteToHexString(data));
+    }
+    NNHAServiceTarget ret = new NNHAServiceTarget(
+        conf, proto.getNameserviceId(), proto.getNamenodeId());
+    InetSocketAddress addressFromProtobuf = new InetSocketAddress(
+        proto.getHostname(), proto.getPort());
+    
+    if (!addressFromProtobuf.equals(ret.getAddress())) {
+      throw new RuntimeException("Mismatched address stored in ZK for " +
+          ret + ": Stored protobuf was " + proto + ", address from our own " +
+          "configuration for this NameNode was " + ret.getAddress());
+    }
+    
+    ret.setZkfcPort(proto.getZkfcPort());
+    return ret;
+  }
+
+  @Override
+  protected byte[] targetToData(HAServiceTarget target) {
+    InetSocketAddress addr = target.getAddress();
+
+    return ActiveNodeInfo.newBuilder()
+      .setHostname(addr.getHostName())
+      .setPort(addr.getPort())
+      .setZkfcPort(target.getZKFCAddress().getPort())
+      .setNameserviceId(localNNTarget.getNameServiceId())
+      .setNamenodeId(localNNTarget.getNameNodeId())
+      .build()
+      .toByteArray();
+  }
+  
+  @Override
+  protected InetSocketAddress getRpcAddressToBindTo() {
+    int zkfcPort = getZkfcPort(conf);
+    return new InetSocketAddress(localTarget.getAddress().getAddress(),
+          zkfcPort);
+  }
+  
+
+  @Override
+  protected PolicyProvider getPolicyProvider() {
+    return new HDFSPolicyProvider();
+  }
+  
+  static int getZkfcPort(Configuration conf) {
+    return conf.getInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY,
+        DFSConfigKeys.DFS_HA_ZKFC_PORT_DEFAULT);
+  }
+  
+  public static DFSZKFailoverController create(Configuration conf) {
+    Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
+    String nsId = DFSUtil.getNamenodeNameServiceId(conf);
+
+    if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
+      throw new HadoopIllegalArgumentException(
+          "HA is not enabled for this namenode.");
+    }
+    String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
+    NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
+    DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
+    
+    NNHAServiceTarget localTarget = new NNHAServiceTarget(
+        localNNConf, nsId, nnId);
+    return new DFSZKFailoverController(localNNConf, localTarget);
+  }
+
+  private DFSZKFailoverController(Configuration conf,
+      NNHAServiceTarget localTarget) {
+    super(conf, localTarget);
+    this.localNNTarget = localTarget;
+    // Setup ACLs
+    adminAcl = new AccessControlList(
+        conf.get(DFSConfigKeys.DFS_ADMIN, " "));
+    LOG.info("Failover controller configured for NameNode " +
+        localTarget);
+}
+  
+  
+  @Override
+  protected void initRPC() throws IOException {
+    super.initRPC();
+    localNNTarget.setZkfcPort(rpcServer.getAddress().getPort());
+  }
+
+  @Override
+  public void loginAsFCUser() throws IOException {
+    InetSocketAddress socAddr = NameNode.getAddress(conf);
+    SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
+        DFS_NAMENODE_USER_NAME_KEY, socAddr.getHostName());
+  }
+  
+  @Override
+  protected String getScopeInsideParentNode() {
+    return localNNTarget.getNameServiceId();
+  }
+
+  public static void main(String args[])
+      throws Exception {
+    
+    GenericOptionsParser parser = new GenericOptionsParser(
+        new HdfsConfiguration(), args);
+    DFSZKFailoverController zkfc = DFSZKFailoverController.create(
+        parser.getConfiguration());
+    
+    System.exit(zkfc.run(parser.getRemainingArgs()));
+  }
+
+  @Override
+  protected void checkRpcAdminAccess() throws IOException, AccessControlException {
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    UserGroupInformation zkfcUgi = UserGroupInformation.getLoginUser();
+    if (adminAcl.isUserAllowed(ugi) ||
+        ugi.getShortUserName().equals(zkfcUgi.getShortUserName())) {
+      LOG.info("Allowed RPC access from " + ugi + " at " + Server.getRemoteAddress());
+      return;
+    }
+    String msg = "Disallowed RPC access from " + ugi + " at " +
+        Server.getRemoteAddress() + ". Not listed in " + DFSConfigKeys.DFS_ADMIN; 
+    LOG.warn(msg);
+    throw new AccessControlException(msg);
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java
index 51612be..c0e415a84 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java
@@ -21,14 +21,16 @@
 import java.io.PrintStream;
 import java.net.InetSocketAddress;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.NameNodeProxies;
-import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.tools.GetGroupsBase;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 import org.apache.hadoop.util.ToolRunner;
@@ -39,6 +41,8 @@
  */
 @InterfaceAudience.Private
 public class GetGroups extends GetGroupsBase {
+  
+  private static final Log LOG = LogFactory.getLog(GetGroups.class);
 
   static{
     HdfsConfiguration.init();
@@ -60,6 +64,22 @@
   }
   
   @Override
+  public void setConf(Configuration conf) {
+    conf = new HdfsConfiguration(conf);
+    String nameNodePrincipal = conf.get(
+        DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, "");
+    
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Using NN principal: " + nameNodePrincipal);
+    }
+
+    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
+        nameNodePrincipal);
+    
+    super.setConf(conf);
+  }
+  
+  @Override
   protected GetUserMappingsProtocol getUgmProtocol() throws IOException {
     return NameNodeProxies.createProxy(getConf(), FileSystem.getDefaultUri(getConf()),
         GetUserMappingsProtocol.class).getProxy();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
index 1ef58e1..38f5123 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
@@ -21,6 +21,7 @@
 import java.util.Map;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.BadFencingConfigurationException;
 import org.apache.hadoop.ha.HAServiceTarget;
 import org.apache.hadoop.ha.NodeFencer;
@@ -44,12 +45,14 @@
   private static final String NAMENODE_ID_KEY = "namenodeid";
   
   private final InetSocketAddress addr;
+  private InetSocketAddress zkfcAddr;
   private NodeFencer fencer;
   private BadFencingConfigurationException fenceConfigError;
   private final String nnId;
   private final String nsId;
-
-  public NNHAServiceTarget(HdfsConfiguration conf,
+  private final boolean autoFailoverEnabled;
+  
+  public NNHAServiceTarget(Configuration conf,
       String nsId, String nnId) {
     Preconditions.checkNotNull(nnId);
     
@@ -75,12 +78,24 @@
     }
     this.addr = NetUtils.createSocketAddr(serviceAddr,
         NameNode.DEFAULT_PORT);
+
+    this.autoFailoverEnabled = targetConf.getBoolean(
+        DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,
+        DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT);
+    if (autoFailoverEnabled) {
+      int port = DFSZKFailoverController.getZkfcPort(targetConf);
+      if (port != 0) {
+        setZkfcPort(port);
+      }
+    }
+    
     try {
       this.fencer = NodeFencer.create(targetConf,
           DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY);
     } catch (BadFencingConfigurationException e) {
       this.fenceConfigError = e;
     }
+    
     this.nnId = nnId;
     this.nsId = nsId;
   }
@@ -94,10 +109,29 @@
   }
 
   @Override
+  public InetSocketAddress getZKFCAddress() {
+    Preconditions.checkState(autoFailoverEnabled,
+        "ZKFC address not relevant when auto failover is off");
+    assert zkfcAddr != null;
+    
+    return zkfcAddr;
+  }
+  
+  void setZkfcPort(int port) {
+    assert autoFailoverEnabled;
+          
+    this.zkfcAddr = new InetSocketAddress(addr.getAddress(), port);
+  }
+
+  @Override
   public void checkFencingConfigured() throws BadFencingConfigurationException {
     if (fenceConfigError != null) {
       throw fenceConfigError;
     }
+    if (fencer == null) {
+      throw new BadFencingConfigurationException(
+          "No fencer configured for " + this);
+    }
   }
   
   @Override
@@ -125,4 +159,9 @@
     ret.put(NAMESERVICE_ID_KEY, getNameServiceId());
     ret.put(NAMENODE_ID_KEY, getNameNodeId());
   }
+
+  @Override
+  public boolean isAutoFailoverEnabled() {
+    return autoFailoverEnabled;
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java
index 1b3a15b..a314352 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java
@@ -22,6 +22,7 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
 
 import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
@@ -48,7 +49,8 @@
         OfflineEditsLoader loader = null;
         try {
           file = new File(inputFileName);
-          elis = new EditLogFileInputStream(file, -1, -1, false);
+          elis = new EditLogFileInputStream(file, HdfsConstants.INVALID_TXID,
+              HdfsConstants.INVALID_TXID, false);
           loader = new OfflineEditsBinaryLoader(visitor, elis);
         } finally {
           if ((loader == null) && (elis != null)) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
index fdc9892..2aade9e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
@@ -31,11 +31,13 @@
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
 import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement;
+import org.apache.hadoop.hdfs.util.XMLUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.CompressionCodecFactory;
 import org.apache.hadoop.security.token.delegation.DelegationKey;
+import org.xml.sax.helpers.AttributesImpl;
 
 /**
  * ImageLoaderCurrent processes Hadoop FSImage files and walks over
@@ -143,6 +145,7 @@
   @Override
   public void loadImage(DataInputStream in, ImageVisitor v,
       boolean skipBlocks) throws IOException {
+    boolean done = false;
     try {
       v.start();
       v.visitEnclosingElement(ImageElement.FS_IMAGE);
@@ -187,11 +190,13 @@
       }
       
       v.leaveEnclosingElement(); // FSImage
-      v.finish();
-    } catch(IOException e) {
-      // Tell the visitor to clean up, then re-throw the exception
-      v.finishAbnormally();
-      throw e;
+      done = true;
+    } finally {
+      if (done) {
+        v.finish();
+      } else {
+        v.finishAbnormally();
+      }
     }
   }
 
@@ -220,9 +225,29 @@
     for(int i=0; i<numDTokens; i++){
       DelegationTokenIdentifier id = new  DelegationTokenIdentifier();
       id.readFields(in);
-      v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER, id.toString());
+      long expiryTime = in.readLong();
+      v.visitEnclosingElement(ImageElement.DELEGATION_TOKEN_IDENTIFIER);
+      v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_KIND,
+          id.getKind().toString());
+      v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_SEQNO,
+          id.getSequenceNumber());
+      v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_OWNER,
+          id.getOwner().toString());
+      v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_RENEWER,
+          id.getRenewer().toString());
+      v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_REALUSER,
+          id.getRealUser().toString());
+      v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_ISSUE_DATE,
+          id.getIssueDate());
+      v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_MAX_DATE,
+          id.getMaxDate());
+      v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_EXPIRY_TIME,
+          expiryTime);
+      v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_MASTER_KEY_ID,
+          id.getMasterKeyId());
+      v.leaveEnclosingElement(); // DELEGATION_TOKEN_IDENTIFIER
     }
-    v.leaveEnclosingElement();
+    v.leaveEnclosingElement(); // DELEGATION_TOKENS
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java
index 9f617ba..e1b2fda 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java
@@ -71,7 +71,15 @@
     NUM_DELEGATION_TOKENS,
     DELEGATION_TOKENS,
     DELEGATION_TOKEN_IDENTIFIER,
-    DELEGATION_TOKEN_EXPIRY_TIME,
+    DELEGATION_TOKEN_IDENTIFIER_KIND,
+    DELEGATION_TOKEN_IDENTIFIER_SEQNO,
+    DELEGATION_TOKEN_IDENTIFIER_OWNER,
+    DELEGATION_TOKEN_IDENTIFIER_RENEWER,
+    DELEGATION_TOKEN_IDENTIFIER_REALUSER,
+    DELEGATION_TOKEN_IDENTIFIER_ISSUE_DATE,
+    DELEGATION_TOKEN_IDENTIFIER_MAX_DATE,
+    DELEGATION_TOKEN_IDENTIFIER_EXPIRY_TIME,
+    DELEGATION_TOKEN_IDENTIFIER_MASTER_KEY_ID,
     TRANSACTION_ID
   }
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java
index 9ae7c23..a610861 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
 import java.io.IOException;
+import java.util.Date;
 
 /**
  * IndentedImageVisitor walks over an FSImage and displays its structure 
@@ -58,6 +59,16 @@
     write(element + " = " + value + "\n");
   }
 
+  void visit(ImageElement element, long value) throws IOException {
+    if ((element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_EXPIRY_TIME) || 
+        (element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_ISSUE_DATE) || 
+        (element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_MAX_DATE)) {
+      visit(element, new Date(value).toString());
+    } else {
+      visit(element, Long.toString(value));
+    }
+  }
+  
   @Override
   void visitEnclosingElement(ImageElement element) throws IOException {
     printIndents();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
index 1d3208d..f3c5cbb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
@@ -30,8 +30,12 @@
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.cli.PosixParser;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.PositionTrackingInputStream;
 
 /**
  * OfflineImageViewer to dump the contents of an Hadoop image file to XML
@@ -40,6 +44,8 @@
  */
 @InterfaceAudience.Private
 public class OfflineImageViewer {
+  public static final Log LOG = LogFactory.getLog(OfflineImageViewer.class);
+  
   private final static String usage = 
     "Usage: bin/hdfs oiv [OPTIONS] -i INPUTFILE -o OUTPUTFILE\n" +
     "Offline Image Viewer\n" + 
@@ -112,24 +118,28 @@
    */
   public void go() throws IOException  {
     DataInputStream in = null;
-
+    PositionTrackingInputStream tracker = null;
+    ImageLoader fsip = null;
+    boolean done = false;
     try {
-      in = new DataInputStream(new BufferedInputStream(
+      tracker = new PositionTrackingInputStream(new BufferedInputStream(
                new FileInputStream(new File(inputFile))));
+      in = new DataInputStream(tracker);
 
       int imageVersionFile = findImageVersion(in);
 
-      ImageLoader fsip =
-             ImageLoader.LoaderFactory.getLoader(imageVersionFile);
+      fsip = ImageLoader.LoaderFactory.getLoader(imageVersionFile);
 
       if(fsip == null) 
         throw new IOException("No image processor to read version " +
             imageVersionFile + " is available.");
-
       fsip.loadImage(in, processor, skipBlocks);
-
+      done = true;
     } finally {
-      if(in != null) in.close();
+      if (!done) {
+        LOG.error("image loading failed at offset " + tracker.getPos());
+      }
+      IOUtils.cleanup(LOG, in, tracker);
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Canceler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Canceler.java
new file mode 100644
index 0000000..f47c79d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Canceler.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Provides a simple interface where one thread can mark an operation
+ * for cancellation, and another thread can poll for whether the
+ * cancellation has occurred.
+ */
+@InterfaceAudience.Private
+public class Canceler {
+  /**
+   * If the operation has been canceled, set to the reason why
+   * it has been canceled (eg standby moving to active)
+   */
+  private volatile String cancelReason = null;
+  
+  /**
+   * Requests that the current operation be canceled if it is still running.
+   * This does not block until the cancellation is successful.
+   * @param reason the reason why cancellation is requested
+   */
+  public void cancel(String reason) {
+    this.cancelReason = reason;
+  }
+
+  public boolean isCancelled() {
+    return cancelReason != null;
+  }
+  
+  public String getCancellationReason() {
+    return cancelReason;
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java
index 13ca8b4..90471bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java
@@ -173,31 +173,42 @@
    * @return true if element present, false otherwise.
    */
   @SuppressWarnings("unchecked")
+  @Override
   public boolean contains(final Object key) {
+    return getElement((T)key) != null;
+  }
+  
+  /**
+   * Return the element in this set which is equal to
+   * the given key, if such an element exists.
+   * Otherwise returns null.
+   */
+  public T getElement(final T key) {
     // validate key
     if (key == null) {
       throw new IllegalArgumentException("Null element is not supported.");
     }
     // find element
-    final int hashCode = ((T)key).hashCode();
+    final int hashCode = key.hashCode();
     final int index = getIndex(hashCode);
-    return containsElem(index, (T) key, hashCode);
+    return getContainedElem(index, key, hashCode);
   }
 
   /**
-   * Check if the set contains given element at given index.
+   * Check if the set contains given element at given index. If it
+   * does, return that element.
    *
-   * @return true if element present, false otherwise.
+   * @return the element, or null, if no element matches
    */
-  protected boolean containsElem(int index, final T key, int hashCode) {
+  protected T getContainedElem(int index, final T key, int hashCode) {
     for (LinkedElement<T> e = entries[index]; e != null; e = e.next) {
       // element found
       if (hashCode == e.hashCode && e.element.equals(key)) {
-        return true;
+        return e.element;
       }
     }
     // element not found
-    return false;
+    return null;
   }
 
   /**
@@ -240,7 +251,7 @@
     final int hashCode = element.hashCode();
     final int index = getIndex(hashCode);
     // return false if already present
-    if (containsElem(index, element, hashCode)) {
+    if (getContainedElem(index, element, hashCode) != null) {
       return false;
     }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java
index c90d2c7..a0a2a02 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java
@@ -88,7 +88,7 @@
     final int hashCode = element.hashCode();
     final int index = getIndex(hashCode);
     // return false if already present
-    if (containsElem(index, element, hashCode)) {
+    if (getContainedElem(index, element, hashCode) != null) {
       return false;
     }
 
diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/RemoteExecution.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto
similarity index 71%
rename from hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/RemoteExecution.java
rename to hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto
index 6177c79..364c5bd 100644
--- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/RemoteExecution.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto
@@ -15,13 +15,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+option java_package = "org.apache.hadoop.hdfs.server.namenode.ha.proto";
+option java_outer_classname = "HAZKInfoProtos";
 
-package org.apache.hadoop.util;
+message ActiveNodeInfo {
+  required string nameserviceId = 1;
+  required string namenodeId = 2;
 
-public interface RemoteExecution {
-  public void executeCommand (String remoteHostName, String user,
-          String  command) throws Exception;
-  public int getExitCode();
-  public String getOutput();
-  public String getCommandString();
+  required string hostname = 3;
+  required int32 port = 4;
+  required int32 zkfcPort = 5;
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto
index e1ddcf0..c54771b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto
@@ -85,6 +85,16 @@
 }
 
 /**
+ * void request
+ */
+message GetMostRecentCheckpointTxIdRequestProto {
+}
+
+message GetMostRecentCheckpointTxIdResponseProto{
+  required uint64 txId = 1;
+}
+
+/**
  * registration - Namenode reporting the error
  * errorCode - error code indicating the error
  * msg - Free text description of the error
@@ -189,12 +199,18 @@
       returns(GetTransactionIdResponseProto);
 
   /**
+   * Get the transaction ID of the most recently persisted editlog record
+   */
+  rpc getMostRecentCheckpointTxId(GetMostRecentCheckpointTxIdRequestProto) 
+      returns(GetMostRecentCheckpointTxIdResponseProto);
+
+  /**
    * Close the current editlog and open a new one for checkpointing purposes
    */
   rpc rollEditLog(RollEditLogRequestProto) returns(RollEditLogResponseProto);
 
   /**
-   * Close the current editlog and open a new one for checkpointing purposes
+   * Request info about the version running on this NameNode
    */
   rpc versionRequest(VersionRequestProto) returns(VersionResponseProto);
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
index 316c05c..d64f780 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
@@ -113,6 +113,7 @@
   required sfixed64 seqno = 2;
   required bool lastPacketInBlock = 3;
   required sfixed32 dataLen = 4;
+  optional bool syncBlock = 5 [default = false];
 }
 
 enum Status {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
index 09b72b6..4c4bdb5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
@@ -274,7 +274,7 @@
  */
 message BlockWithLocationsProto {
   required BlockProto block = 1;   // Block
-  repeated string datanodeIDs = 2; // Datanodes with replicas of the block
+  repeated string storageIDs = 2;  // Datanodes with replicas of the block
 }
 
 /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
new file mode 100644
index 0000000..10b874b6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
@@ -0,0 +1,2 @@
+org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier
+org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 54ce2a2..9e1a435 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -239,14 +239,6 @@
   left empty in a non-HA cluster.
   </description>
 </property>
-  
-<property>
-  <name>dfs.web.ugi</name>
-  <value>webuser,webgroup</value>
-  <description>The user account used by the web interface.
-    Syntax: USERNAME,GROUP1,GROUP2, ...
-  </description>
-</property>
 
 <property>
   <name>dfs.permissions.enabled</name>
@@ -778,7 +770,7 @@
 </property>
 
 <property>
-  <name>dfs.federation.nameservices</name>
+  <name>dfs.nameservices</name>
   <value></value>
   <description>
     Comma-separated list of nameservices.
@@ -786,12 +778,12 @@
 </property>
 
 <property>
-  <name>dfs.federation.nameservice.id</name>
+  <name>dfs.nameservice.id</name>
   <value></value>
   <description>
     The ID of this nameservice. If the nameservice ID is not
     configured or more than one nameservice is configured for
-    dfs.federation.nameservices it is determined automatically by
+    dfs.nameservices it is determined automatically by
     matching the local node's address with the configured address.
   </description>
 </property>
@@ -837,6 +829,16 @@
 </property>
 
 <property>
+  <name>dfs.ha.automatic-failover.enabled</name>
+  <value>false</value>
+  <description>
+    Whether automatic failover is enabled. See the HDFS High
+    Availability documentation for details on automatic HA
+    configuration.
+  </description>
+</property>
+
+<property>
   <name>dfs.support.append</name>
   <value>true</value>
   <description>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp
index 81e595d..275fd78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp
@@ -39,10 +39,10 @@
 
 <!DOCTYPE html>
 <html>
-
+<head>
 <link rel="stylesheet" type="text/css" href="/static/hadoop.css">
 <title>Hadoop <%=namenodeRole%>&nbsp;<%=namenodeLabel%></title>
-    
+</head>    
 <body>
 <h1><%=namenodeRole%> '<%=namenodeLabel%>' (<%=namenodeState%>)</h1>
 <%= NamenodeJspHelper.getVersionTable(fsn) %>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
new file mode 100644
index 0000000..11c76eb
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
@@ -0,0 +1,157 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+body {
+  background-color : #ffffff;
+  font-family : sans-serif;
+}
+
+.small {
+  font-size : smaller;
+}
+
+div#dfsnodetable tr#row1, div.dfstable td.col1 {
+	font-weight : bolder;
+}
+
+div.dfstable th {
+    text-align:left;
+	vertical-align : top;
+}
+
+div.dfstable td#col3 {
+	text-align : right;
+}
+
+div#dfsnodetable caption {
+	text-align : left;
+}
+
+div#dfsnodetable a#title {
+	font-size : larger;
+	font-weight : bolder;
+}
+
+div#dfsnodetable td, th {
+	border-bottom-style : none;
+        padding-bottom : 4px;
+        padding-top : 4px;       
+}
+
+div#dfsnodetable A:link, A:visited {
+	text-decoration : none;       
+}
+
+div#dfsnodetable th.header, th.headerASC, th.headerDSC {
+        padding-bottom : 8px;
+        padding-top : 8px;       
+}
+div#dfsnodetable th.header:hover, th.headerASC:hover, th.headerDSC:hover,
+                 td.name:hover {
+        text-decoration : underline;
+	cursor : pointer;
+}
+
+div#dfsnodetable td.blocks, td.size, td.pcused, td.adminstate, td.lastcontact {
+	text-align : right;
+}
+
+div#dfsnodetable .rowNormal .header {
+	background-color : #ffffff;
+}
+div#dfsnodetable .rowAlt, .headerASC, .headerDSC {
+	background-color : lightyellow;
+}
+
+.warning {
+        font-weight : bolder;
+        color : red;	
+}
+
+div.dfstable table {
+	white-space : pre;
+}
+
+table.storage, table.nodes {
+    border-collapse: collapse;
+}
+
+table.storage td {
+	padding:10px;
+	border:1px solid black;
+}
+
+table.nodes td {
+	padding:0px;
+	border:1px solid black;
+}
+
+div#dfsnodetable td, div#dfsnodetable th, div.dfstable td {
+	padding-left : 10px;
+	padding-right : 10px;
+}
+
+td.perc_filled {
+  background-color:#AAAAFF;
+}
+
+td.perc_nonfilled {
+  background-color:#FFFFFF;
+}
+
+line.taskgraphline {
+  stroke-width:1;stroke-linecap:round;
+}
+
+#quicklinks {
+	margin: 0;
+	padding: 2px 4px;
+	position: fixed;
+	top: 0;
+	right: 0;
+	text-align: right;
+	background-color: #eee;
+	font-weight: bold;
+}
+
+#quicklinks ul {
+	margin: 0;
+	padding: 0;
+	list-style-type: none;
+	font-weight: normal;
+}
+
+#quicklinks ul {
+	display: none;
+}
+
+#quicklinks a {
+	font-size: smaller;
+	text-decoration: none;
+}
+
+#quicklinks ul a {
+	text-decoration: underline;
+}
+
+span.failed {
+    color:red;
+}
+
+div.security {
+    width:100%;
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java
index 07cb456..a08c7b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java
@@ -139,7 +139,7 @@
   /**
    *  create a buffer that contains the entire test file data.
    */
-  static byte[] initBuffer(int size) {
+  public static byte[] initBuffer(int size) {
     if (seed == -1)
       seed = nextLong();
     return randomBytes(seed, size);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 5b50cef..1ed534c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -67,19 +67,23 @@
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.VersionInfo;
 
 import com.google.common.base.Joiner;
 
@@ -319,7 +323,7 @@
    */
   public static void waitCorruptReplicas(FileSystem fs, FSNamesystem ns,
       Path file, ExtendedBlock b, int corruptRepls)
-      throws IOException, TimeoutException {
+      throws IOException, TimeoutException, InterruptedException {
     int count = 0;
     final int ATTEMPTS = 50;
     int repls = ns.getBlockManager().numCorruptReplicas(b.getLocalBlock());
@@ -333,6 +337,7 @@
       System.out.println("Waiting for "+corruptRepls+" corrupt replicas");
       repls = ns.getBlockManager().numCorruptReplicas(b.getLocalBlock());
       count++;
+      Thread.sleep(1000);
     }
     if (count == ATTEMPTS) {
       throw new TimeoutException("Timed out waiting for corrupt replicas."
@@ -703,18 +708,19 @@
           info.nameserviceId), DFSUtil.createUri(HdfsConstants.HDFS_URI_SCHEME,
               info.nameNode.getNameNodeAddress()).toString());
     }
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, Joiner.on(",")
+    conf.set(DFSConfigKeys.DFS_NAMESERVICES, Joiner.on(",")
         .join(nameservices));
   }
   
   private static DatanodeID getDatanodeID(String ipAddr) {
-    return new DatanodeID(ipAddr, "localhost",
-        DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
+    return new DatanodeID(ipAddr, "localhost", "",
+        DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
+        DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
+        DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
   }
 
   public static DatanodeID getLocalDatanodeID() {
-    return new DatanodeID("127.0.0.1", "localhost",
-        DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
+    return getDatanodeID("127.0.0.1");
   }
 
   public static DatanodeID getLocalDatanodeID(int port) {
@@ -740,12 +746,14 @@
 
   public static DatanodeInfo getDatanodeInfo(String ipAddr, 
       String host, int port) {
-    return new DatanodeInfo(new DatanodeID(ipAddr, host, port));
+    return new DatanodeInfo(new DatanodeID(ipAddr, host, "",
+        port, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
+        DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT));
   }
 
   public static DatanodeInfo getLocalDatanodeInfo(String ipAddr,
       String hostname, AdminStates adminState) {
-    return new DatanodeInfo(ipAddr, hostname, "storage",
+    return new DatanodeInfo(ipAddr, hostname, "",
         DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
         DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
         DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT,
@@ -760,6 +768,14 @@
 
   public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
       int port, String rackLocation) {
-    return new DatanodeDescriptor(new DatanodeID(ipAddr, port), rackLocation);
+    DatanodeID dnId = new DatanodeID(ipAddr, "host", "", port,
+        DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
+        DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
+    return new DatanodeDescriptor(dnId, rackLocation);
+  }
+  
+  public static DatanodeRegistration getLocalDatanodeRegistration() {
+    return new DatanodeRegistration(getLocalDatanodeID(),
+        new StorageInfo(), new ExportedBlockKeys(), VersionInfo.getVersion());
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 2b7346e..c116ed1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -25,8 +25,8 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
@@ -67,8 +67,10 @@
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.ha.HAServiceProtocolHelper;
 import org.apache.hadoop.ha.ServiceFailedException;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
 import org.apache.hadoop.ha.protocolPB.HAServiceProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -131,6 +133,7 @@
     private int numDataNodes = 1;
     private boolean format = true;
     private boolean manageNameDfsDirs = true;
+    private boolean manageNameDfsSharedDirs = true;
     private boolean manageDataDfsDirs = true;
     private StartupOption option = null;
     private String[] racks = null; 
@@ -188,6 +191,14 @@
     /**
      * Default: true
      */
+    public Builder manageNameDfsSharedDirs(boolean val) {
+      this.manageNameDfsSharedDirs = val;
+      return this;
+    }
+
+    /**
+     * Default: true
+     */
     public Builder manageDataDfsDirs(boolean val) {
       this.manageDataDfsDirs = val;
       return this;
@@ -286,6 +297,7 @@
                        builder.numDataNodes,
                        builder.format,
                        builder.manageNameDfsDirs,
+                       builder.manageNameDfsSharedDirs,
                        builder.manageDataDfsDirs,
                        builder.option,
                        builder.racks,
@@ -525,7 +537,7 @@
                         long[] simulatedCapacities) throws IOException {
     this.nameNodes = new NameNodeInfo[1]; // Single namenode in the cluster
     initMiniDFSCluster(conf, numDataNodes, format,
-        manageNameDfsDirs, manageDataDfsDirs, operation, racks, hosts,
+        manageNameDfsDirs, true, manageDataDfsDirs, operation, racks, hosts,
         simulatedCapacities, null, true, false,
         MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0));
   }
@@ -533,7 +545,8 @@
   private void initMiniDFSCluster(
       Configuration conf,
       int numDataNodes, boolean format, boolean manageNameDfsDirs,
-      boolean manageDataDfsDirs, StartupOption operation, String[] racks,
+      boolean manageNameDfsSharedDirs, boolean manageDataDfsDirs,
+      StartupOption operation, String[] racks,
       String[] hosts, long[] simulatedCapacities, String clusterId,
       boolean waitSafeMode, boolean setupHostsFile,
       MiniDFSNNTopology nnTopology)
@@ -572,7 +585,8 @@
     
     federation = nnTopology.isFederated();
     createNameNodesAndSetConf(
-        nnTopology, manageNameDfsDirs, format, operation, clusterId, conf);
+        nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
+        format, operation, clusterId, conf);
     
     if (format) {
       if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
@@ -593,8 +607,8 @@
   }
   
   private void createNameNodesAndSetConf(MiniDFSNNTopology nnTopology,
-      boolean manageNameDfsDirs, boolean format, StartupOption operation,
-      String clusterId,
+      boolean manageNameDfsDirs, boolean manageNameDfsSharedDirs,
+      boolean format, StartupOption operation, String clusterId,
       Configuration conf) throws IOException {
     Preconditions.checkArgument(nnTopology.countNameNodes() > 0,
         "empty NN topology: no namenodes specified!");
@@ -612,7 +626,7 @@
       }
     }
     if (!allNsIds.isEmpty()) {
-      conf.set(DFS_FEDERATION_NAMESERVICES, Joiner.on(",").join(allNsIds));
+      conf.set(DFS_NAMESERVICES, Joiner.on(",").join(allNsIds));
     }
     
     int nnCounter = 0;
@@ -639,7 +653,7 @@
       if (nnIds.size() > 1) {
         conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, nameservice.getId()),
             Joiner.on(",").join(nnIds));
-        if (manageNameDfsDirs) {
+        if (manageNameDfsSharedDirs) {
           URI sharedEditsUri = getSharedEditsDir(nnCounter, nnCounter+nnIds.size()-1); 
           conf.set(DFS_NAMENODE_SHARED_EDITS_DIR_KEY, sharedEditsUri.toString());
         }
@@ -719,7 +733,7 @@
       boolean manageNameDfsDirs, int nnIndex)
       throws IOException {
     if (nameserviceId != null) {
-      conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
+      conf.set(DFS_NAMESERVICE_ID, nameserviceId);
     }
     if (nnId != null) {
       conf.set(DFS_HA_NAMENODE_ID_KEY, nnId);
@@ -1260,6 +1274,13 @@
   public int getNameNodePort(int nnIndex) {
     return nameNodes[nnIndex].nameNode.getNameNodeAddress().getPort();
   }
+
+  /**
+   * @return the service rpc port used by the NameNode at the given index.
+   */     
+  public int getNameNodeServicePort(int nnIndex) {
+    return nameNodes[nnIndex].nameNode.getServiceRpcAddress().getPort();
+  }
     
   /**
    * Shutdown all the nodes in the cluster.
@@ -1660,19 +1681,16 @@
     return FSNamesystem.getNamespaceEditsDirs(nameNodes[nnIndex].conf);
   }
   
-  private HAServiceProtocol getHaServiceClient(int nnIndex) throws IOException {
-    InetSocketAddress addr = nameNodes[nnIndex].nameNode.getServiceRpcAddress();
-    return new HAServiceProtocolClientSideTranslatorPB(addr, conf);
-  }
-  
   public void transitionToActive(int nnIndex) throws IOException,
       ServiceFailedException {
-    HAServiceProtocolHelper.transitionToActive(getHaServiceClient(nnIndex));
+    getNameNode(nnIndex).getRpcServer().transitionToActive(
+        new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER_FORCED));
   }
   
   public void transitionToStandby(int nnIndex) throws IOException,
       ServiceFailedException {
-    HAServiceProtocolHelper.transitionToStandby(getHaServiceClient(nnIndex));
+    getNameNode(nnIndex).getRpcServer().transitionToStandby(
+        new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER_FORCED));
   }
   
   
@@ -2118,9 +2136,9 @@
     nameNodes = newlist;
     String nameserviceId = NAMESERVICE_ID_PREFIX + (nnIndex + 1);
     
-    String nameserviceIds = conf.get(DFS_FEDERATION_NAMESERVICES);
+    String nameserviceIds = conf.get(DFS_NAMESERVICES);
     nameserviceIds += "," + nameserviceId;
-    conf.set(DFS_FEDERATION_NAMESERVICES, nameserviceIds);
+    conf.set(DFS_NAMESERVICES, nameserviceIds);
   
     String nnId = null;
     initNameNodeAddress(conf, nameserviceId,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index ad0f74e..2ba4dde 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -65,10 +65,13 @@
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.mockito.Mockito;
 import org.mockito.internal.stubbing.answers.ThrowsException;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
+import com.google.common.base.Joiner;
+
 /**
  * These tests make sure that DFSClient retries fetching data from DFS
  * properly in case of errors.
@@ -298,6 +301,100 @@
       cluster.shutdown();
     }
   }
+  
+  /**
+   * Test that getAdditionalBlock() and close() are idempotent. This allows
+   * a client to safely retry a call and still produce a correct
+   * file. See HDFS-3031.
+   */
+  public void testIdempotentAllocateBlockAndClose() throws Exception {
+    final String src = "/testIdempotentAllocateBlock";
+    Path file = new Path(src);
+
+    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+
+    try {
+      cluster.waitActive();
+      FileSystem fs = cluster.getFileSystem();
+      NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
+      NamenodeProtocols spyNN = spy(preSpyNN);
+      DFSClient client = new DFSClient(null, spyNN, conf, null);
+
+      
+      // Make the call to addBlock() get called twice, as if it were retried
+      // due to an IPC issue.
+      doAnswer(new Answer<LocatedBlock>() {
+        @Override
+        public LocatedBlock answer(InvocationOnMock invocation) throws Throwable {
+          LocatedBlock ret = (LocatedBlock) invocation.callRealMethod();
+          LocatedBlocks lb = cluster.getNameNodeRpc().getBlockLocations(src, 0, Long.MAX_VALUE);
+          int blockCount = lb.getLocatedBlocks().size();
+          assertEquals(lb.getLastLocatedBlock().getBlock(), ret.getBlock());
+          
+          // Retrying should result in a new block at the end of the file.
+          // (abandoning the old one)
+          LocatedBlock ret2 = (LocatedBlock) invocation.callRealMethod();
+          lb = cluster.getNameNodeRpc().getBlockLocations(src, 0, Long.MAX_VALUE);
+          int blockCount2 = lb.getLocatedBlocks().size();
+          assertEquals(lb.getLastLocatedBlock().getBlock(), ret2.getBlock());
+
+          // We shouldn't have gained an extra block by the RPC.
+          assertEquals(blockCount, blockCount2);
+          return (LocatedBlock) ret2;
+        }
+      }).when(spyNN).addBlock(Mockito.anyString(), Mockito.anyString(),
+          Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any());
+
+      doAnswer(new Answer<Boolean>() {
+
+        @Override
+        public Boolean answer(InvocationOnMock invocation) throws Throwable {
+          // complete() may return false a few times before it returns
+          // true. We want to wait until it returns true, and then
+          // make it retry one more time after that.
+          LOG.info("Called complete(: " +
+              Joiner.on(",").join(invocation.getArguments()) + ")");
+          if (!(Boolean)invocation.callRealMethod()) {
+            LOG.info("Complete call returned false, not faking a retry RPC");
+            return false;
+          }
+          // We got a successful close. Call it again to check idempotence.
+          try {
+            boolean ret = (Boolean) invocation.callRealMethod();
+            LOG.info("Complete call returned true, faked second RPC. " +
+                "Returned: " + ret);
+            return ret;
+          } catch (Throwable t) {
+            LOG.error("Idempotent retry threw exception", t);
+            throw t;
+          }
+        }
+      }).when(spyNN).complete(Mockito.anyString(), Mockito.anyString(),
+          Mockito.<ExtendedBlock>any());
+      
+      OutputStream stm = client.create(file.toString(), true);
+      try {
+        AppendTestUtil.write(stm, 0, 10000);
+        stm.close();
+        stm = null;
+      } finally {
+        IOUtils.cleanup(LOG, stm);
+      }
+      
+      // Make sure the mock was actually properly injected.
+      Mockito.verify(spyNN, Mockito.atLeastOnce()).addBlock(
+          Mockito.anyString(), Mockito.anyString(),
+          Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any());
+      Mockito.verify(spyNN, Mockito.atLeastOnce()).complete(
+          Mockito.anyString(), Mockito.anyString(),
+          Mockito.<ExtendedBlock>any());
+      
+      AppendTestUtil.check(fs, file, 10000);
+    } finally {
+      cluster.shutdown();
+    }
+  }
 
   /**
    * Mock Answer implementation of NN.getBlockLocations that will return
@@ -422,17 +519,20 @@
     LOG.info("Test 4 succeeded! Time spent: "  + (timestamp2-timestamp)/1000.0 + " sec.");
   }
 
-  private boolean busyTest(int xcievers, int threads, int fileLen, int timeWin, int retries) 
+  private boolean busyTest(int xcievers, int threads, int fileLen, int timeWin, int retries)
     throws IOException {
 
     boolean ret = true;
     short replicationFactor = 1;
     long blockSize = 128*1024*1024; // DFS block size
     int bufferSize = 4096;
-    
-    conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY, xcievers);
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 
-                retries);
+    int originalXcievers = conf.getInt(
+      DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
+      DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT);
+    conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
+      xcievers);
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
+      retries);
     conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, timeWin);
     // Disable keepalive
     conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, 0);
@@ -508,6 +608,8 @@
       e.printStackTrace();
       ret = false;
     } finally {
+      conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
+        originalXcievers);
       fs.delete(file1, false);
       cluster.shutdown();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
index 38a8372..2474242 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
@@ -202,7 +202,7 @@
     case CREATE:
       FSDataOutputStream out = fs.create(name, permission, true, 
           conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-          fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
+          fs.getDefaultReplication(name), fs.getDefaultBlockSize(name), null);
       out.close();
       break;
     case MKDIRS:
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
index b07bad25..685f19c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
@@ -248,7 +248,7 @@
       baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
       deleteMatchingFiles(baseDirs, "edits.*");
       startNameNodeShouldFail(StartupOption.ROLLBACK,
-          "No non-corrupt logs for txid ");
+          "Gap in transactions");
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
       log("NameNode rollback with no image file", numDirs);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index c95f3ba..acd3dda 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -100,7 +100,7 @@
 
   private Configuration setupAddress(String key) {
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
+    conf.set(DFS_NAMESERVICES, "nn1");
     conf.set(DFSUtil.addKeySuffixes(key, "nn1"), "localhost:9000");
     return conf;
   }
@@ -112,7 +112,7 @@
   @Test
   public void getNameServiceId() {
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
+    conf.set(DFS_NAMESERVICE_ID, "nn1");
     assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
   }
   
@@ -157,7 +157,7 @@
   @Test(expected = HadoopIllegalArgumentException.class)
   public void testGetNameServiceIdException() {
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
+    conf.set(DFS_NAMESERVICES, "nn1,nn2");
     conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
         "localhost:9000");
     conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
@@ -172,7 +172,7 @@
   @Test
   public void testGetNameServiceIds() {
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
+    conf.set(DFS_NAMESERVICES, "nn1,nn2");
     Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
     Iterator<String> it = nameserviceIds.iterator();
     assertEquals(2, nameserviceIds.size());
@@ -183,11 +183,11 @@
   @Test
   public void testGetOnlyNameServiceIdOrNull() {
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2");
+    conf.set(DFS_NAMESERVICES, "ns1,ns2");
     assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf));
-    conf.set(DFS_FEDERATION_NAMESERVICES, "");
+    conf.set(DFS_NAMESERVICES, "");
     assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf));
-    conf.set(DFS_FEDERATION_NAMESERVICES, "ns1");
+    conf.set(DFS_NAMESERVICES, "ns1");
     assertEquals("ns1", DFSUtil.getOnlyNameServiceIdOrNull(conf));
   }
 
@@ -199,7 +199,7 @@
   @Test
   public void testMultipleNamenodes() throws IOException {
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
+    conf.set(DFS_NAMESERVICES, "nn1,nn2");
     // Test - configured list of namenodes are returned
     final String NN1_ADDRESS = "localhost:9000";
     final String NN2_ADDRESS = "localhost:9001";
@@ -270,11 +270,11 @@
     final HdfsConfiguration conf = new HdfsConfiguration();
     String nsId = "ns1";
     
-    conf.set(DFS_FEDERATION_NAMESERVICES, nsId);
-    conf.set(DFS_FEDERATION_NAMESERVICE_ID, nsId);
+    conf.set(DFS_NAMESERVICES, nsId);
+    conf.set(DFS_NAMESERVICE_ID, nsId);
 
     // Set the nameservice specific keys with nameserviceId in the config key
-    for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
+    for (String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
       // Note: value is same as the key
       conf.set(DFSUtil.addKeySuffixes(key, nsId), key);
     }
@@ -284,7 +284,7 @@
 
     // Retrieve the keys without nameserviceId and Ensure generic keys are set
     // to the correct value
-    for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
+    for (String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
       assertEquals(key, conf.get(key));
     }
   }
@@ -299,12 +299,12 @@
     String nsId = "ns1";
     String nnId = "nn1";
     
-    conf.set(DFS_FEDERATION_NAMESERVICES, nsId);
-    conf.set(DFS_FEDERATION_NAMESERVICE_ID, nsId);
+    conf.set(DFS_NAMESERVICES, nsId);
+    conf.set(DFS_NAMESERVICE_ID, nsId);
     conf.set(DFS_HA_NAMENODES_KEY_PREFIX + "." + nsId, nnId);
 
     // Set the nameservice specific keys with nameserviceId in the config key
-    for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
+    for (String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
       // Note: value is same as the key
       conf.set(DFSUtil.addKeySuffixes(key, nsId, nnId), key);
     }
@@ -314,7 +314,7 @@
 
     // Retrieve the keys without nameserviceId and Ensure generic keys are set
     // to the correct value
-    for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
+    for (String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
       assertEquals(key, conf.get(key));
     }
   }
@@ -409,14 +409,20 @@
   }
 
   @Test
-  public void testGetServerInfo() {
+  public void testGetInfoServer() throws IOException {
     HdfsConfiguration conf = new HdfsConfiguration();
     conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
     UserGroupInformation.setConfiguration(conf);
+    
     String httpsport = DFSUtil.getInfoServer(null, conf, true);
     assertEquals("0.0.0.0:"+DFS_NAMENODE_HTTPS_PORT_DEFAULT, httpsport);
+    
     String httpport = DFSUtil.getInfoServer(null, conf, false);
     assertEquals("0.0.0.0:"+DFS_NAMENODE_HTTP_PORT_DEFAULT, httpport);
+    
+    String httpAddress = DFSUtil.getInfoServer(new InetSocketAddress(
+        "localhost", 8020), conf, false);
+    assertEquals("localhost:" + DFS_NAMENODE_HTTP_PORT_DEFAULT, httpAddress);
   }
   
   @Test
@@ -430,7 +436,7 @@
     conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1");
     
     // Two nameservices, each with two NNs.
-    conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2");
+    conf.set(DFS_NAMESERVICES, "ns1,ns2");
     conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),
         "ns1-nn1,ns1-nn2");
     conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns2"),
@@ -491,7 +497,7 @@
     final String NS1_NN2_HOST = "ns1-nn1.example.com:8020";
     final String NS1_NN2_HOST_SVC = "ns1-nn2.example.com:8021";
    
-    conf.set(DFS_FEDERATION_NAMESERVICES, "ns1");
+    conf.set(DFS_NAMESERVICES, "ns1");
     conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2"); 
 
     conf.set(DFSUtil.addKeySuffixes(
@@ -533,30 +539,74 @@
   public void testGetNNUris() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration();
     
-    final String NS1_NN1_HOST = "ns1-nn1.example.com:8020";
-    final String NS1_NN2_HOST = "ns1-nn1.example.com:8020";
-    final String NS2_NN_HOST  = "ns2-nn.example.com:8020";
-    final String NN_HOST      = "nn.example.com:8020";
+    final String NS1_NN1_ADDR   = "ns1-nn1.example.com:8020";
+    final String NS1_NN2_ADDR   = "ns1-nn2.example.com:8020";
+    final String NS2_NN_ADDR    = "ns2-nn.example.com:8020";
+    final String NN1_ADDR       = "nn.example.com:8020";
+    final String NN1_SRVC_ADDR  = "nn.example.com:8021";
+    final String NN2_ADDR       = "nn2.example.com:8020";
     
-    conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2");
+    conf.set(DFS_NAMESERVICES, "ns1,ns2");
     conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
     conf.set(DFSUtil.addKeySuffixes(
-        DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_HOST);
+        DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_ADDR);
     conf.set(DFSUtil.addKeySuffixes(
-        DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_HOST);
+        DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_ADDR);
     
     conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns2"),
-        NS2_NN_HOST);
+        NS2_NN_ADDR);
     
-    conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "hdfs://" + NN_HOST);
+    conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "hdfs://" + NN1_ADDR);
     
-    Collection<URI> uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY, 
-        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
+    conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN2_ADDR);
+    
+    Collection<URI> uris = DFSUtil.getNameServiceUris(conf,
+        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,  DFS_NAMENODE_RPC_ADDRESS_KEY);
+    
+    assertEquals(4, uris.size());
+    assertTrue(uris.contains(new URI("hdfs://ns1")));
+    assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
+    assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)));
+    assertTrue(uris.contains(new URI("hdfs://" + NN2_ADDR)));
+    
+    // Make sure that non-HDFS URIs in fs.defaultFS don't get included.
+    conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
+        "viewfs://vfs-name.example.com");
+    
+    uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, 
+        DFS_NAMENODE_RPC_ADDRESS_KEY);
     
     assertEquals(3, uris.size());
     assertTrue(uris.contains(new URI("hdfs://ns1")));
-    assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_HOST)));
-    assertTrue(uris.contains(new URI("hdfs://" + NN_HOST)));
+    assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
+    assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)));
+    
+    // Make sure that an HA URI being the default URI doesn't result in multiple
+    // entries being returned.
+    conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1");
+    
+    uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, 
+        DFS_NAMENODE_RPC_ADDRESS_KEY);
+    
+    assertEquals(3, uris.size());
+    assertTrue(uris.contains(new URI("hdfs://ns1")));
+    assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
+    assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)));
+    
+    // Make sure that when a service RPC address is used that is distinct from
+    // the client RPC address, and that client RPC address is also used as the
+    // default URI, that the client URI does not end up in the set of URIs
+    // returned.
+    conf = new HdfsConfiguration();
+    conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN1_ADDR);
+    conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, NN1_ADDR);
+    conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NN1_SRVC_ADDR);
+    
+    uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, 
+        DFS_NAMENODE_RPC_ADDRESS_KEY);
+    
+    assertEquals(1, uris.size());
+    assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)));
   }
   
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
index 3ef892b..aed15d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
@@ -159,7 +159,8 @@
       block.getNumBytes(), // OffsetInBlock
       100,                 // sequencenumber
       true,                // lastPacketInBlock
-      0);                  // chunk length
+      0,                   // chunk length
+      false);               // sync block
     hdr.write(sendOut);
     sendOut.writeInt(0);           // zero checksum
 
@@ -402,7 +403,8 @@
       0,     // offset in block,
       100,   // seqno
       false, // last packet
-      -1 - random.nextInt(oneMil)); // bad datalen
+      -1 - random.nextInt(oneMil), // bad datalen
+      false);
     hdr.write(sendOut);
 
     sendResponse(Status.SUCCESS, "", null, recvOut);
@@ -424,7 +426,8 @@
       0,     // OffsetInBlock
       100,   // sequencenumber
       true,  // lastPacketInBlock
-      0);    // chunk length
+      0,     // chunk length
+      false);    
     hdr.write(sendOut);
     sendOut.writeInt(0);           // zero checksum
     sendOut.flush();
@@ -508,8 +511,8 @@
       1024,                // OffsetInBlock
       100,                 // sequencenumber
       false,               // lastPacketInBlock
-      4096);               // chunk length
-
+      4096,                // chunk length
+      false);
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
     hdr.write(new DataOutputStream(baos));
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
index fbe98dc..25a7917 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
@@ -39,8 +39,7 @@
 import org.apache.log4j.Level;
 
 /**
- * This class tests that a file need not be closed before its
- * data can be read by another client.
+ * This class tests that pipelines survive data node death and recovery.
  */
 public class TestDatanodeDeath extends TestCase {
   {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
index 6bb7b45..71c898b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
@@ -38,8 +38,7 @@
 import org.junit.Test;
 
 /**
- * This class tests that a file need not be closed before its
- * data can be read by another client.
+ * This class tests data node registration.
  */
 public class TestDatanodeRegistration {
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
index 40c0a1a..038edd8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
@@ -21,10 +21,6 @@
 import java.io.IOException;
 import java.io.RandomAccessFile;
 
-import junit.extensions.TestSetup;
-import junit.framework.Test;
-import junit.framework.TestSuite;
-
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
@@ -43,9 +39,13 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 import org.apache.log4j.Level;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 /** This class implements some of tests posted in HADOOP-2658. */
-public class TestFileAppend3 extends junit.framework.TestCase {
+public class TestFileAppend3  {
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
@@ -64,29 +64,28 @@
   private static MiniDFSCluster cluster;
   private static DistributedFileSystem fs;
 
-  public static Test suite() {
-    return new TestSetup(new TestSuite(TestFileAppend3.class)) {
-      protected void setUp() throws java.lang.Exception {
-        AppendTestUtil.LOG.info("setUp()");
-        conf = new HdfsConfiguration();
-        conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
-        buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
-        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
-        fs = (DistributedFileSystem)cluster.getFileSystem();
-      }
-    
-      protected void tearDown() throws Exception {
-        AppendTestUtil.LOG.info("tearDown()");
-        if(fs != null) fs.close();
-        if(cluster != null) cluster.shutdown();
-      }
-    };  
+  @BeforeClass
+  public static void setUp() throws java.lang.Exception {
+    AppendTestUtil.LOG.info("setUp()");
+    conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
+    buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
+    fs = (DistributedFileSystem)cluster.getFileSystem();
+  }
+   
+  @AfterClass
+  public static void tearDown() throws Exception {
+    AppendTestUtil.LOG.info("tearDown()");
+    if(fs != null) fs.close();
+    if(cluster != null) cluster.shutdown();
   }
 
   /**
    * TC1: Append on block boundary.
    * @throws IOException an exception might be thrown
    */
+  @Test
   public void testTC1() throws Exception {
     final Path p = new Path("/TC1/foo");
     System.out.println("p=" + p);
@@ -115,6 +114,7 @@
    * TC2: Append on non-block boundary.
    * @throws IOException an exception might be thrown
    */
+  @Test
   public void testTC2() throws Exception {
     final Path p = new Path("/TC2/foo");
     System.out.println("p=" + p);
@@ -145,6 +145,7 @@
    * TC5: Only one simultaneous append.
    * @throws IOException an exception might be thrown
    */
+  @Test
   public void testTC5() throws Exception {
     final Path p = new Path("/TC5/foo");
     System.out.println("p=" + p);
@@ -175,6 +176,7 @@
    * TC7: Corrupted replicas are present.
    * @throws IOException an exception might be thrown
    */
+  @Test
   public void testTC7() throws Exception {
     final short repl = 2;
     final Path p = new Path("/TC7/foo");
@@ -224,6 +226,7 @@
    * TC11: Racing rename
    * @throws IOException an exception might be thrown
    */
+  @Test
   public void testTC11() throws Exception {
     final Path p = new Path("/TC11/foo");
     System.out.println("p=" + p);
@@ -282,6 +285,7 @@
    * TC12: Append to partial CRC chunk
    * @throws IOException an exception might be thrown
    */
+  @Test
   public void testTC12() throws Exception {
     final Path p = new Path("/TC12/foo");
     System.out.println("p=" + p);
@@ -313,6 +317,7 @@
    * *
    * @throws IOException
    */
+  @Test
   public void testAppendToPartialChunk() throws IOException {
     final Path p = new Path("/partialChunk/foo");
     final int fileLen = 513;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
index e10eab8..6b18965 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
@@ -176,4 +176,32 @@
       cluster.shutdown();
     }
   }
+
+  /**
+   * Test to append to the file, when one of datanode in the existing pipeline is down.
+   * @throws Exception
+   */
+  @Test
+  public void testAppendWithPipelineRecovery() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).manageDataDfsDirs(true)
+          .manageNameDfsDirs(true).numDataNodes(4)
+          .racks(new String[] { "/rack1", "/rack1", "/rack1", "/rack2" })
+          .build();
+      cluster.waitActive();
+
+      DistributedFileSystem fs = cluster.getFileSystem();
+      Path path = new Path("/test1");
+      DFSTestUtil.createFile(fs, path, 1024, (short) 3, 1l);
+
+      cluster.stopDataNode(3);
+      DFSTestUtil.appendFile(fs, path, "hello");
+    } finally {
+      if (null != cluster) {
+        cluster.shutdown();
+      }
+    }
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java
index 3516cf6..428454c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java
@@ -31,11 +31,13 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.log4j.Level;
 
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+
 /**
- * This class tests that a file need not be closed before its
- * data can be read by another client.
+ * This class tests client lease recovery.
  */
-public class TestFileCreationClient extends junit.framework.TestCase {
+public class TestFileCreationClient {
   static final String DIR = "/" + TestFileCreationClient.class.getSimpleName() + "/";
 
   {
@@ -46,6 +48,7 @@
   }
 
   /** Test lease recovery Triggered by DFSClient. */
+  @Test
   public void testClientTriggeredLeaseRecovery() throws Exception {
     final int REPLICATION = 3;
     Configuration conf = new HdfsConfiguration();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
index 7370f72..b2cd115 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
@@ -101,18 +101,18 @@
       BlockWithLocations[] locs;
       locs = namenode.getBlocks(dataNodes[0], fileLen).getBlocks();
       assertEquals(locs.length, 2);
-      assertEquals(locs[0].getDatanodes().length, 2);
-      assertEquals(locs[1].getDatanodes().length, 2);
+      assertEquals(locs[0].getStorageIDs().length, 2);
+      assertEquals(locs[1].getStorageIDs().length, 2);
 
       // get blocks of size BlockSize from dataNodes[0]
       locs = namenode.getBlocks(dataNodes[0], DEFAULT_BLOCK_SIZE).getBlocks();
       assertEquals(locs.length, 1);
-      assertEquals(locs[0].getDatanodes().length, 2);
+      assertEquals(locs[0].getStorageIDs().length, 2);
 
       // get blocks of size 1 from dataNodes[0]
       locs = namenode.getBlocks(dataNodes[0], 1).getBlocks();
       assertEquals(locs.length, 1);
-      assertEquals(locs[0].getDatanodes().length, 2);
+      assertEquals(locs[0].getStorageIDs().length, 2);
 
       // get blocks of size 0 from dataNodes[0]
       getBlocksWithException(namenode, dataNodes[0], 0);     
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index b1d31da..a747a33 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -875,8 +875,8 @@
       // 6kb block
       // 192kb quota
       final int FILE_SIZE = 1024;
-      final int QUOTA_SIZE = 32 * (int) fs.getDefaultBlockSize();
-      assertEquals(6 * 1024, fs.getDefaultBlockSize());
+      final int QUOTA_SIZE = 32 * (int) fs.getDefaultBlockSize(dir);
+      assertEquals(6 * 1024, fs.getDefaultBlockSize(dir));
       assertEquals(192 * 1024, QUOTA_SIZE);
 
       // Create the dir and set the quota. We need to enable the quota before
@@ -903,7 +903,7 @@
       assertEquals("Invalid space consumed", 59 * FILE_SIZE * 3,
           c.getSpaceConsumed());
       assertEquals("Invalid space consumed", QUOTA_SIZE - (59 * FILE_SIZE * 3),
-          3 * (fs.getDefaultBlockSize() - FILE_SIZE));
+          3 * (fs.getDefaultBlockSize(dir) - FILE_SIZE));
 
       // Now check that trying to create another file violates the quota
       try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
index 9a7504a..5fee500 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
@@ -38,8 +38,7 @@
 import org.junit.Test;
 
 /**
- * This class tests that a file need not be closed before its
- * data can be read by another client.
+ * This class tests that data nodes are correctly replaced on failure.
  */
 public class TestReplaceDatanodeOnFailure {
   static final Log LOG = AppendTestUtil.LOG;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
index 217960b..f58c863 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
@@ -161,7 +161,7 @@
 
   private void compare(BlockWithLocations locs1, BlockWithLocations locs2) {
     assertEquals(locs1.getBlock(), locs2.getBlock());
-    assertTrue(Arrays.equals(locs1.getDatanodes(), locs2.getDatanodes()));
+    assertTrue(Arrays.equals(locs1.getStorageIDs(), locs2.getStorageIDs()));
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
index 0125b02..e9abc21 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
@@ -17,12 +17,15 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import static org.apache.hadoop.hdfs.server.common.Util.now;
 import static org.junit.Assert.*;
 import java.io.File;
 import java.io.IOException;
+import java.util.Collection;
 
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -32,11 +35,14 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.TestDatanodeBlockScanner;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.junit.Test;
 
 public class TestOverReplicatedBlocks {
@@ -116,6 +122,77 @@
       cluster.shutdown();
     }
   }
+
+  static final long SMALL_BLOCK_SIZE =
+    DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
+  static final long SMALL_FILE_LENGTH = SMALL_BLOCK_SIZE * 4;
+
+  /**
+   * The test verifies that replica for deletion is chosen on a node,
+   * with the oldest heartbeat, when this heartbeat is larger than the
+   * tolerable heartbeat interval.
+   * It creates a file with several blocks and replication 4.
+   * The last DN is configured to send heartbeats rarely.
+   * 
+   * Test waits until the tolerable heartbeat interval expires, and reduces
+   * replication of the file. All replica deletions should be scheduled for the
+   * last node. No replicas will actually be deleted, since last DN doesn't
+   * send heartbeats. 
+   */
+  @Test
+  public void testChooseReplicaToDelete() throws IOException {
+    MiniDFSCluster cluster = null;
+    FileSystem fs = null;
+    try {
+      Configuration conf = new HdfsConfiguration();
+      conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK_SIZE);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+      fs = cluster.getFileSystem();
+      final FSNamesystem namesystem = cluster.getNamesystem();
+
+      conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 300);
+      cluster.startDataNodes(conf, 1, true, null, null, null);
+      DataNode lastDN = cluster.getDataNodes().get(3);
+      DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
+          lastDN, namesystem.getBlockPoolId());
+      String lastDNid = dnReg.getStorageID();
+
+      final Path fileName = new Path("/foo2");
+      DFSTestUtil.createFile(fs, fileName, SMALL_FILE_LENGTH, (short)4, 0L);
+      DFSTestUtil.waitReplication(fs, fileName, (short)4);
+
+      // Wait for tolerable number of heartbeats plus one
+      DatanodeDescriptor nodeInfo = null;
+      long lastHeartbeat = 0;
+      long waitTime = DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 *
+        (DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1);
+      do {
+        nodeInfo = 
+          namesystem.getBlockManager().getDatanodeManager().getDatanode(dnReg);
+        lastHeartbeat = nodeInfo.getLastUpdate();
+      } while(now() - lastHeartbeat < waitTime);
+      fs.setReplication(fileName, (short)3);
+
+      BlockLocation locs[] = fs.getFileBlockLocations(
+          fs.getFileStatus(fileName), 0, Long.MAX_VALUE);
+
+      // All replicas for deletion should be scheduled on lastDN.
+      // And should not actually be deleted, because lastDN does not heartbeat.
+      namesystem.readLock();
+      Collection<Block> dnBlocks = 
+        namesystem.getBlockManager().excessReplicateMap.get(lastDNid);
+      assertEquals("Replicas on node " + lastDNid + " should have been deleted",
+          SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks.size());
+      namesystem.readUnlock();
+      for(BlockLocation location : locs)
+        assertEquals("Block should still have 4 replicas",
+            4, location.getNames().length);
+    } finally {
+      if(fs != null) fs.close();
+      if(cluster != null) cluster.shutdown();
+    }
+  }
+
   /**
    * Test over replicated block should get invalidated when decreasing the
    * replication for a partial block.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
index eda9f61..c7fafdb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
@@ -114,7 +114,7 @@
     UserGroupInformation ugi = JspHelper.getUGI(context, request, conf);
     Token<? extends TokenIdentifier> tokenInUgi = ugi.getTokens().iterator()
         .next();
-    Assert.assertEquals(tokenInUgi.getService().toString(), expected);
+    Assert.assertEquals(expected, tokenInUgi.getService().toString());
   }
   
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
index 78e0541..b37e4d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
@@ -29,6 +29,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -115,7 +116,7 @@
             0, HdfsConstants.LAYOUT_VERSION))
       .when(mock).versionRequest();
     
-    Mockito.doReturn(new DatanodeRegistration("1.2.3.4", 100))
+    Mockito.doReturn(DFSTestUtil.getLocalDatanodeRegistration())
       .when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
     
     Mockito.doAnswer(new HeartbeatAnswer(nnIdx))
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java
index c0301ac..b10d27e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java
@@ -101,7 +101,7 @@
   @Test
   public void testFederationRefresh() throws Exception {
     Configuration conf = new Configuration();
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES,
+    conf.set(DFSConfigKeys.DFS_NAMESERVICES,
         "ns1,ns2");
     addNN(conf, "ns1", "mock1:8020");
     addNN(conf, "ns2", "mock1:8020");
@@ -112,7 +112,7 @@
     log.setLength(0);
 
     // Remove the first NS
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES,
+    conf.set(DFSConfigKeys.DFS_NAMESERVICES,
         "ns1");
     bpm.refreshNamenodes(conf);
     assertEquals(
@@ -122,7 +122,7 @@
     
     // Add back an NS -- this creates a new BPOS since the old
     // one for ns2 should have been previously retired
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES,
+    conf.set(DFSConfigKeys.DFS_NAMESERVICES,
         "ns1,ns2");
     bpm.refreshNamenodes(conf);
     assertEquals(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
index 6c890b8..3ec7101 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
@@ -425,7 +425,7 @@
     DataNode spyDN = spy(dn);
     doThrow(new RecoveryInProgressException("Replica recovery is in progress")).
        when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
-    Daemon d = spyDN.recoverBlocks(initRecoveringBlocks());
+    Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
     d.join();
     verify(spyDN, never()).syncBlock(
         any(RecoveringBlock.class), anyListOf(BlockRecord.class));
@@ -445,7 +445,7 @@
     DataNode spyDN = spy(dn);
     doThrow(new IOException()).
        when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
-    Daemon d = spyDN.recoverBlocks(initRecoveringBlocks());
+    Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
     d.join();
     verify(spyDN, never()).syncBlock(
         any(RecoveringBlock.class), anyListOf(BlockRecord.class));
@@ -465,7 +465,7 @@
     doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
         block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
         initReplicaRecovery(any(RecoveringBlock.class));
-    Daemon d = spyDN.recoverBlocks(initRecoveringBlocks());
+    Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
     d.join();
     DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
     verify(dnP).commitBlockSynchronization(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java
index 973f05b..630f19a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java
@@ -18,16 +18,20 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URL;
 import java.net.URLEncoder;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.jsp.JspWriter;
 
+import org.apache.commons.lang.StringEscapeUtils;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -44,9 +48,10 @@
   
   private static final String FILE_DATA = "foo bar baz biz buz";
   private static final HdfsConfiguration CONF = new HdfsConfiguration();
+  private static String viewFilePage;
   
-  private static void testViewingFile(MiniDFSCluster cluster, String filePath,
-      boolean doTail) throws IOException {
+  private static void testViewingFile(MiniDFSCluster cluster, String filePath)
+      throws IOException {
     FileSystem fs = cluster.getFileSystem();
     
     Path testPath = new Path(filePath);
@@ -58,23 +63,46 @@
     InetSocketAddress nnHttpAddress = cluster.getNameNode().getHttpAddress();
     int dnInfoPort = cluster.getDataNodes().get(0).getInfoPort();
     
-    String jspName = doTail ? "tail.jsp" : "browseDirectory.jsp";
-    String fileParamName = doTail ? "filename" : "dir";
+    URL url = new URL("http://localhost:" + dnInfoPort + "/"
+        + "browseDirectory.jsp" + JspHelper.getUrlParam("dir", 
+            URLEncoder.encode(testPath.toString(), "UTF-8"), true)
+        + JspHelper.getUrlParam("namenodeInfoPort", Integer
+            .toString(nnHttpAddress.getPort())) + JspHelper
+            .getUrlParam("nnaddr", "localhost:" + nnIpcAddress.getPort()));
     
-    URL url = new URL("http://localhost:" + dnInfoPort + "/" + jspName +
-        JspHelper.getUrlParam(fileParamName, URLEncoder.encode(testPath.toString(), "UTF-8"), true) +
-        JspHelper.getUrlParam("namenodeInfoPort", Integer.toString(nnHttpAddress.getPort())) + 
-        JspHelper.getUrlParam("nnaddr", "localhost:" + nnIpcAddress.getPort()));
-    
-    String viewFilePage = DFSTestUtil.urlGet(url);
+    viewFilePage = StringEscapeUtils.unescapeHtml(DFSTestUtil.urlGet(url));
     
     assertTrue("page should show preview of file contents, got: " + viewFilePage,
         viewFilePage.contains(FILE_DATA));
     
-    if (!doTail) {
-      assertTrue("page should show link to download file", viewFilePage
-          .contains("/streamFile" + ServletUtil.encodePath(testPath.toString()) +
-              "?nnaddr=localhost:" + nnIpcAddress.getPort()));
+    assertTrue("page should show link to download file", viewFilePage
+        .contains("/streamFile" + ServletUtil.encodePath(filePath)
+            + "?nnaddr=localhost:" + nnIpcAddress.getPort()));
+    
+    // check whether able to tail the file
+    String regex = "<a.+href=\"(.+?)\">Tail\\s*this\\s*file\\<\\/a\\>";
+    assertFileContents(regex, "Tail this File");
+    
+    // check whether able to 'Go Back to File View' after tailing the file
+    regex = "<a.+href=\"(.+?)\">Go\\s*Back\\s*to\\s*File\\s*View\\<\\/a\\>";
+    assertFileContents(regex, "Go Back to File View");
+  }
+  
+  private static void assertFileContents(String regex, String text)
+      throws IOException {
+    Pattern compile = Pattern.compile(regex);
+    Matcher matcher = compile.matcher(viewFilePage);
+    URL hyperlink = null;
+    if (matcher.find()) {
+      // got hyperlink for Tail this file
+      hyperlink = new URL(matcher.group(1));
+      viewFilePage = StringEscapeUtils.unescapeHtml(DFSTestUtil
+          .urlGet(hyperlink));
+      assertTrue("page should show preview of file contents", viewFilePage
+          .contains(FILE_DATA));
+    } else {
+      fail(text + " hyperlink should be there in the page content : "
+          + viewFilePage);
     }
   }
   
@@ -97,8 +125,8 @@
         "/foo\">bar/foo\">bar"
       };
       for (String p : paths) {
-        testViewingFile(cluster, p, false);
-        testViewingFile(cluster, p, true);
+        testViewingFile(cluster, p);
+        testViewingFile(cluster, p);
       }
     } finally {
       if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
index 080f47c..fe6e8b7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
@@ -46,7 +46,7 @@
     Configuration conf = new Configuration();
     MiniDFSCluster cluster = null;
     try {
-      conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES,
+      conf.set(DFSConfigKeys.DFS_NAMESERVICES,
           "namesServerId1,namesServerId2");
       cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
@@ -79,7 +79,7 @@
       }
 
       Configuration nn1Conf = cluster.getConfiguration(1);
-      nn1Conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "namesServerId2");
+      nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId2");
       dn1.refreshNamenodes(nn1Conf);
       assertEquals(1, dn1.getAllBpOs().length);
 
@@ -155,7 +155,7 @@
     Configuration conf = new Configuration();
     MiniDFSCluster cluster = null;
     try {
-      conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES,
+      conf.set(DFSConfigKeys.DFS_NAMESERVICES,
           "namesServerId1,namesServerId2");
       cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
@@ -178,7 +178,7 @@
       File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
       
       Configuration nn1Conf = cluster.getConfiguration(0);
-      nn1Conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "namesServerId1");
+      nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1");
       dn1.refreshNamenodes(nn1Conf);
       Assert.assertEquals(1, dn1.getAllBpOs().length);
       
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java
new file mode 100644
index 0000000..cf2e448
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java
@@ -0,0 +1,192 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import static org.apache.hadoop.test.MetricsAsserts.*;
+
+import java.util.EnumSet;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.AppendTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.io.RandomDatum;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.SequenceFile.CompressionType;
+import org.apache.hadoop.io.SequenceFile.Writer;
+import org.apache.hadoop.io.compress.DefaultCodec;
+import org.junit.Test;
+
+public class TestHSync {
+  
+  private void checkSyncMetric(MiniDFSCluster cluster, int dn, long value) {
+    DataNode datanode = cluster.getDataNodes().get(dn);
+    assertCounter("FsyncCount", value, getMetrics(datanode.getMetrics().name()));    
+  }
+  private void checkSyncMetric(MiniDFSCluster cluster, long value) {
+    checkSyncMetric(cluster, 0, value);
+  }
+  /** Test basic hsync cases */
+  @Test
+  public void testHSync() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    final FileSystem fs = cluster.getFileSystem();
+
+    final Path p = new Path("/testHSync/foo");
+    final int len = 1 << 16;
+    FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
+        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
+        4096, (short) 1, len, null);
+    out.hflush();
+    // hflush does not sync
+    checkSyncMetric(cluster, 0);
+    out.hsync();
+    // hsync on empty file does nothing
+    checkSyncMetric(cluster, 0);
+    out.write(1);
+    checkSyncMetric(cluster, 0);
+    out.hsync();
+    checkSyncMetric(cluster, 1);
+    // avoiding repeated hsyncs is a potential future optimization
+    out.hsync();
+    checkSyncMetric(cluster, 2);
+    out.hflush();
+    // hflush still does not sync
+    checkSyncMetric(cluster, 2);
+    out.close();
+    // close is sync'ing
+    checkSyncMetric(cluster, 3);
+
+    // same with a file created with out SYNC_BLOCK
+    out = fs.create(p, FsPermission.getDefault(),
+        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
+        4096, (short) 1, len, null);
+    out.hsync();
+    checkSyncMetric(cluster, 3);
+    out.write(1);
+    checkSyncMetric(cluster, 3);
+    out.hsync();
+    checkSyncMetric(cluster, 4);
+    // repeated hsyncs
+    out.hsync();
+    checkSyncMetric(cluster, 5);
+    out.close();
+    // close does not sync (not opened with SYNC_BLOCK)
+    checkSyncMetric(cluster, 5);
+    cluster.shutdown();
+  }
+
+  /** Test hsync on an exact block boundary */
+  @Test
+  public void testHSyncBlockBoundary() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    final FileSystem fs = cluster.getFileSystem();
+    
+    final Path p = new Path("/testHSyncBlockBoundary/foo");
+    final int len = 1 << 16;
+    final byte[] fileContents = AppendTestUtil.initBuffer(len);
+    FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
+        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
+        4096, (short) 1, len, null);
+    // fill exactly one block (tests the SYNC_BLOCK case) and flush
+    out.write(fileContents, 0, len);
+    out.hflush();
+    // the full block should have caused a sync
+    checkSyncMetric(cluster, 1);
+    out.hsync();
+    // first on block again
+    checkSyncMetric(cluster, 1);
+    // write one more byte and sync again
+    out.write(1);
+    out.hsync();
+    checkSyncMetric(cluster, 2);
+    out.close();
+    checkSyncMetric(cluster, 3);
+    cluster.shutdown();
+  }
+
+  /** Test hsync via SequenceFiles */
+  @Test
+  public void testSequenceFileSync() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+
+    final FileSystem fs = cluster.getFileSystem();
+    final Path p = new Path("/testSequenceFileSync/foo");
+    final int len = 1 << 16;
+    FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
+        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
+        4096, (short) 1, len, null);
+    Writer w = SequenceFile.createWriter(new Configuration(),
+        Writer.stream(out),
+        Writer.keyClass(RandomDatum.class),
+        Writer.valueClass(RandomDatum.class),
+        Writer.compression(CompressionType.NONE, new DefaultCodec()));
+    w.hflush();
+    checkSyncMetric(cluster, 0);
+    w.hsync();
+    checkSyncMetric(cluster, 1);
+    int seed = new Random().nextInt();
+    RandomDatum.Generator generator = new RandomDatum.Generator(seed);
+    generator.next();
+    w.append(generator.getKey(), generator.getValue());
+    w.hsync();
+    checkSyncMetric(cluster, 2);
+    w.close();
+    checkSyncMetric(cluster, 2);
+    out.close();
+    checkSyncMetric(cluster, 3);
+    cluster.shutdown();
+  }
+
+  /** Test that syncBlock is correctly performed at replicas */
+  @Test
+  public void testHSyncWithReplication() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+    final FileSystem fs = cluster.getFileSystem();
+
+    final Path p = new Path("/testHSyncWithReplication/foo");
+    final int len = 1 << 16;
+    FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
+        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
+        4096, (short) 3, len, null);
+    out.write(1);
+    out.hflush();
+    checkSyncMetric(cluster, 0, 0);
+    checkSyncMetric(cluster, 1, 0);
+    checkSyncMetric(cluster, 2, 0);
+    out.hsync();
+    checkSyncMetric(cluster, 0, 1);
+    checkSyncMetric(cluster, 1, 1);
+    checkSyncMetric(cluster, 2, 1);
+    out.hsync();
+    checkSyncMetric(cluster, 0, 2);
+    checkSyncMetric(cluster, 1, 2);
+    checkSyncMetric(cluster, 2, 2);
+    cluster.shutdown();
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
index a21cab5..14a0da3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
@@ -105,7 +105,7 @@
         namenodesBuilder.append(",");
       }
 
-      conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, namenodesBuilder
+      conf.set(DFSConfigKeys.DFS_NAMESERVICES, namenodesBuilder
           .toString());
       DataNode dn = cluster.getDataNodes().get(0);
       dn.refreshNamenodes(conf);
@@ -122,7 +122,7 @@
 
       namenodesBuilder.append(DFSUtil.getNamenodeNameServiceId(cluster
           .getConfiguration(2)));
-      conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, namenodesBuilder
+      conf.set(DFSConfigKeys.DFS_NAMESERVICES, namenodesBuilder
           .toString());
       dn.refreshNamenodes(conf);
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index ec5b8a7..7644e05 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -35,10 +35,12 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
@@ -765,6 +767,7 @@
     ArrayList<Block> blocks;
     int nrBlocks; // actual number of blocks
     long[] blockReportList;
+    int dnIdx;
 
     /**
      * Return a a 6 digit integer port.
@@ -780,11 +783,7 @@
     }
 
     TinyDatanode(int dnIdx, int blockCapacity) throws IOException {
-      String ipAddr = DNS.getDefaultIP("default");
-      String hostName = DNS.getDefaultHost("default", "default");
-      dnRegistration = new DatanodeRegistration(ipAddr, getNodePort(dnIdx));
-      dnRegistration.setHostName(hostName);
-      dnRegistration.setSoftwareVersion(VersionInfo.getVersion());
+      this.dnIdx = dnIdx;
       this.blocks = new ArrayList<Block>(blockCapacity);
       this.nrBlocks = 0;
     }
@@ -800,7 +799,14 @@
     void register() throws IOException {
       // get versions from the namenode
       nsInfo = nameNodeProto.versionRequest();
-      dnRegistration.setStorageInfo(new DataStorage(nsInfo, ""));
+      dnRegistration = new DatanodeRegistration(
+          new DatanodeID(DNS.getDefaultIP("default"),
+              DNS.getDefaultHost("default", "default"),
+              "", getNodePort(dnIdx),
+              DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
+              DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
+          new DataStorage(nsInfo, ""),
+          new ExportedBlockKeys(), VersionInfo.getVersion());
       DataNode.setNewStorageID(dnRegistration);
       // register datanode
       dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
@@ -896,12 +902,9 @@
         for(int t = 0; t < blockTargets.length; t++) {
           DatanodeInfo dnInfo = blockTargets[t];
           DatanodeRegistration receivedDNReg;
-          receivedDNReg =
-            new DatanodeRegistration(dnInfo.getIpAddr(), dnInfo.getXferPort());
-          receivedDNReg.setStorageInfo(
-            new DataStorage(nsInfo, dnInfo.getStorageID()));
-          receivedDNReg.setInfoPort(dnInfo.getInfoPort());
-          receivedDNReg.setIpcPort(dnInfo.getIpcPort());
+          receivedDNReg = new DatanodeRegistration(dnInfo,
+            new DataStorage(nsInfo, dnInfo.getStorageID()),
+            new ExportedBlockKeys(), VersionInfo.getVersion());
           ReceivedDeletedBlockInfo[] rdBlocks = {
             new ReceivedDeletedBlockInfo(
                   blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
index 288ae8a..49d0f5d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
@@ -1115,7 +1115,7 @@
     Configuration conf = new HdfsConfiguration();
     String nameserviceId1 = "ns1";
     String nameserviceId2 = "ns2";
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nameserviceId1
+    conf.set(DFSConfigKeys.DFS_NAMESERVICES, nameserviceId1
         + "," + nameserviceId2);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
index d1136bc..5e77d73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
@@ -22,6 +22,7 @@
 import java.net.URI;
 import java.util.Collection;
 import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -527,7 +528,7 @@
     } catch (IOException e) {
       // expected
       assertEquals("Cause of exception should be ChecksumException",
-          e.getCause().getClass(), ChecksumException.class);
+          ChecksumException.class, e.getCause().getClass());
     }
   }
 
@@ -739,8 +740,9 @@
         throw ioe;
       } else {
         GenericTestUtils.assertExceptionContains(
-            "No non-corrupt logs for txid 3",
-            ioe);
+          "Gap in transactions. Expected to be able to read up until " +
+          "at least txid 3 but unable to find any edit logs containing " +
+          "txid 3", ioe);
       }
     } finally {
       cluster.shutdown();
@@ -765,16 +767,16 @@
       tracker = new FSEditLogLoader.PositionTrackingInputStream(in);
       in = new DataInputStream(tracker);
             
-      reader = new FSEditLogOp.Reader(in, version);
+      reader = new FSEditLogOp.Reader(in, tracker, version);
     }
   
     @Override
-    public long getFirstTxId() throws IOException {
+    public long getFirstTxId() {
       return HdfsConstants.INVALID_TXID;
     }
     
     @Override
-    public long getLastTxId() throws IOException {
+    public long getLastTxId() {
       return HdfsConstants.INVALID_TXID;
     }
   
@@ -1103,9 +1105,9 @@
 
     for (EditLogInputStream edits : editStreams) {
       FSEditLogLoader.EditLogValidation val = FSEditLogLoader.validateEditLog(edits);
-      long read = val.getNumTransactions();
+      long read = (val.getEndTxId() - edits.getFirstTxId()) + 1;
       LOG.info("Loading edits " + edits + " read " + read);
-      assertEquals(startTxId, val.getStartTxId());
+      assertEquals(startTxId, edits.getFirstTxId());
       startTxId += read;
       totaltxnread += read;
     }
@@ -1153,7 +1155,9 @@
       fail("Should have thrown exception");
     } catch (IOException ioe) {
       GenericTestUtils.assertExceptionContains(
-          "No non-corrupt logs for txid " + startGapTxId, ioe);
+          "Gap in transactions. Expected to be able to read up until " +
+          "at least txid 40 but unable to find any edit logs containing " +
+          "txid 11", ioe);
     }
   }
 
@@ -1227,4 +1231,55 @@
       validateNoCrash(garbage);
     }
   }
+
+  /**
+   * Test creating a directory with lots and lots of edit log segments
+   */
+  @Test
+  public void testManyEditLogSegments() throws IOException {
+    final int NUM_EDIT_LOG_ROLLS = 1000;
+    // start a cluster
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = null;
+    FileSystem fileSys = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
+      cluster.waitActive();
+      fileSys = cluster.getFileSystem();
+      final FSNamesystem namesystem = cluster.getNamesystem();
+      FSImage fsimage = namesystem.getFSImage();
+      final FSEditLog editLog = fsimage.getEditLog();
+      for (int i = 0; i < NUM_EDIT_LOG_ROLLS; i++){
+        editLog.logSetReplication("fakefile" + i, (short)(i % 3));
+        assertExistsInStorageDirs(
+            cluster, NameNodeDirType.EDITS,
+            NNStorage.getInProgressEditsFileName((i * 3) + 1));
+        editLog.logSync();
+        editLog.rollEditLog();
+        assertExistsInStorageDirs(
+            cluster, NameNodeDirType.EDITS,
+            NNStorage.getFinalizedEditsFileName((i * 3) + 1, (i * 3) + 3));
+      }
+      editLog.close();
+    } finally {
+      if(fileSys != null) fileSys.close();
+      if(cluster != null) cluster.shutdown();
+    }
+
+    // How long does it take to read through all these edit logs?
+    long startTime = System.currentTimeMillis();
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).
+          numDataNodes(NUM_DATA_NODES).build();
+      cluster.waitActive();
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+    long endTime = System.currentTimeMillis();
+    double delta = ((float)(endTime - startTime)) / 1000.0;
+    LOG.info(String.format("loaded %d edit log segments in %.2f seconds",
+        NUM_EDIT_LOG_ROLLS, delta));
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
index b5097a7..d39df40 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
@@ -40,8 +40,6 @@
 import org.mockito.Mockito;
 
 public class TestEditLogFileOutputStream {
-  
-  private final static long PREALLOCATION_LENGTH = (1024 * 1024) + 4;
   private final static int HEADER_LEN = 17;
   private static final File TEST_EDITS =
     new File(System.getProperty("test.build.data","/tmp"),
@@ -51,24 +49,25 @@
   public void deleteEditsFile() {
     TEST_EDITS.delete();
   }
-  
+
   @Test
   public void testPreallocation() throws IOException {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
         .build();
 
+    final long START_TXID = 1;
     StorageDirectory sd = cluster.getNameNode().getFSImage()
       .getStorage().getStorageDir(0);
-    File editLog = NNStorage.getInProgressEditsFile(sd, 1);
+    File editLog = NNStorage.getInProgressEditsFile(sd, START_TXID);
 
     EditLogValidation validation = EditLogFileInputStream.validateEditLog(editLog);
     assertEquals("Edit log should contain a header as valid length",
         HEADER_LEN, validation.getValidLength());
-    assertEquals(1, validation.getNumTransactions());
+    assertEquals(validation.getEndTxId(), START_TXID);
     assertEquals("Edit log should have 1MB pre-allocated, plus 4 bytes " +
         "for the version number",
-        PREALLOCATION_LENGTH, editLog.length());
+        EditLogFileOutputStream.PREALLOCATION_LENGTH + 4, editLog.length());
     
 
     cluster.getFileSystem().mkdirs(new Path("/tmp"),
@@ -79,10 +78,10 @@
     assertTrue("Edit log should have more valid data after writing a txn " +
         "(was: " + oldLength + " now: " + validation.getValidLength() + ")",
         validation.getValidLength() > oldLength);
-    assertEquals(2, validation.getNumTransactions());
+    assertEquals(1, validation.getEndTxId() - START_TXID);
 
     assertEquals("Edit log should be 1MB long, plus 4 bytes for the version number",
-        PREALLOCATION_LENGTH, editLog.length());
+        EditLogFileOutputStream.PREALLOCATION_LENGTH + 4, editLog.length());
     // 256 blocks for the 1MB of preallocation space
     assertTrue("Edit log disk space used should be at least 257 blocks",
         256 * 4096 <= new DU(editLog, conf).getUsed());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
index 1917dde..a54df2c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
@@ -22,12 +22,15 @@
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.BufferedInputStream;
 import java.io.File;
 import java.io.FileInputStream;
+import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.RandomAccessFile;
 import java.nio.channels.FileChannel;
 import java.util.Map;
+import java.util.Set;
 import java.util.SortedMap;
 
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -38,16 +41,23 @@
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.log4j.Level;
 import org.junit.Test;
 
 import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
 import com.google.common.io.Files;
 
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.spy;
+
 public class TestFSEditLogLoader {
   
   static {
@@ -152,108 +162,6 @@
   }
   
   /**
-   * Test that the valid number of transactions can be counted from a file.
-   * @throws IOException 
-   */
-  @Test
-  public void testCountValidTransactions() throws IOException {
-    File testDir = new File(TEST_DIR, "testCountValidTransactions");
-    File logFile = new File(testDir,
-        NNStorage.getInProgressEditsFileName(1));
-    
-    // Create a log file, and return the offsets at which each
-    // transaction starts.
-    FSEditLog fsel = null;
-    final int NUM_TXNS = 30;
-    SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap();
-    try {
-      fsel = FSImageTestUtil.createStandaloneEditLog(testDir);
-      fsel.openForWrite();
-      assertTrue("should exist: " + logFile, logFile.exists());
-      
-      for (int i = 0; i < NUM_TXNS; i++) {
-        long trueOffset = getNonTrailerLength(logFile);
-        long thisTxId = fsel.getLastWrittenTxId() + 1;
-        offsetToTxId.put(trueOffset, thisTxId);
-        System.err.println("txid " + thisTxId + " at offset " + trueOffset);
-        fsel.logDelete("path" + i, i);
-        fsel.logSync();
-      }
-    } finally {
-      if (fsel != null) {
-        fsel.close();
-      }
-    }
-
-    // The file got renamed when the log was closed.
-    logFile = testDir.listFiles()[0];
-    long validLength = getNonTrailerLength(logFile);
-
-    // Make sure that uncorrupted log has the expected length and number
-    // of transactions.
-    EditLogValidation validation = EditLogFileInputStream.validateEditLog(logFile);
-    assertEquals(NUM_TXNS + 2, validation.getNumTransactions());
-    assertEquals(validLength, validation.getValidLength());
-    
-    // Back up the uncorrupted log
-    File logFileBak = new File(testDir, logFile.getName() + ".bak");
-    Files.copy(logFile, logFileBak);
-
-    // Corrupt the log file in various ways for each txn
-    for (Map.Entry<Long, Long> entry : offsetToTxId.entrySet()) {
-      long txOffset = entry.getKey();
-      long txid = entry.getValue();
-      
-      // Restore backup, truncate the file exactly before the txn
-      Files.copy(logFileBak, logFile);
-      truncateFile(logFile, txOffset);
-      validation = EditLogFileInputStream.validateEditLog(logFile);
-      assertEquals("Failed when truncating to length " + txOffset,
-          txid - 1, validation.getNumTransactions());
-      assertEquals(txOffset, validation.getValidLength());
-
-      // Restore backup, truncate the file with one byte in the txn,
-      // also isn't valid
-      Files.copy(logFileBak, logFile);
-      truncateFile(logFile, txOffset + 1);
-      validation = EditLogFileInputStream.validateEditLog(logFile);
-      assertEquals("Failed when truncating to length " + (txOffset + 1),
-          txid - 1, validation.getNumTransactions());
-      assertEquals(txOffset, validation.getValidLength());
-
-      // Restore backup, corrupt the txn opcode
-      Files.copy(logFileBak, logFile);
-      corruptByteInFile(logFile, txOffset);
-      validation = EditLogFileInputStream.validateEditLog(logFile);
-      assertEquals("Failed when corrupting txn opcode at " + txOffset,
-          txid - 1, validation.getNumTransactions());
-      assertEquals(txOffset, validation.getValidLength());
-
-      // Restore backup, corrupt a byte a few bytes into the txn
-      Files.copy(logFileBak, logFile);
-      corruptByteInFile(logFile, txOffset+5);
-      validation = EditLogFileInputStream.validateEditLog(logFile);
-      assertEquals("Failed when corrupting txn data at " + (txOffset+5),
-          txid - 1, validation.getNumTransactions());
-      assertEquals(txOffset, validation.getValidLength());
-    }
-    
-    // Corrupt the log at every offset to make sure that validation itself
-    // never throws an exception, and that the calculated lengths are monotonically
-    // increasing
-    long prevNumValid = 0;
-    for (long offset = 0; offset < validLength; offset++) {
-      Files.copy(logFileBak, logFile);
-      corruptByteInFile(logFile, offset);
-      EditLogValidation val = EditLogFileInputStream.validateEditLog(logFile);
-      assertTrue(String.format("%d should have been >= %d",
-          val.getNumTransactions(), prevNumValid),
-          val.getNumTransactions() >= prevNumValid);
-      prevNumValid = val.getNumTransactions();
-    }
-  }
-
-  /**
    * Corrupt the byte at the given offset in the given file,
    * by subtracting 1 from it.
    */
@@ -316,4 +224,118 @@
       fis.close();
     }
   }
+
+  @Test
+  public void testStreamLimiter() throws IOException {
+    final File LIMITER_TEST_FILE = new File(TEST_DIR, "limiter.test");
+    
+    FileOutputStream fos = new FileOutputStream(LIMITER_TEST_FILE);
+    try {
+      fos.write(0x12);
+      fos.write(0x12);
+      fos.write(0x12);
+    } finally {
+      fos.close();
+    }
+    
+    FileInputStream fin = new FileInputStream(LIMITER_TEST_FILE);
+    BufferedInputStream bin = new BufferedInputStream(fin);
+    FSEditLogLoader.PositionTrackingInputStream tracker = 
+        new FSEditLogLoader.PositionTrackingInputStream(bin);
+    try {
+      tracker.setLimit(2);
+      tracker.mark(100);
+      tracker.read();
+      tracker.read();
+      try {
+        tracker.read();
+        fail("expected to get IOException after reading past the limit");
+      } catch (IOException e) {
+      }
+      tracker.reset();
+      tracker.mark(100);
+      byte arr[] = new byte[3];
+      try {
+        tracker.read(arr);
+        fail("expected to get IOException after reading past the limit");
+      } catch (IOException e) {
+      }
+      tracker.reset();
+      arr = new byte[2];
+      tracker.read(arr);
+    } finally {
+      tracker.close();
+    }
+  }
+
+  /**
+   * Create an unfinalized edit log for testing purposes
+   *
+   * @param testDir           Directory to create the edit log in
+   * @param numTx             Number of transactions to add to the new edit log
+   * @param offsetToTxId      A map from transaction IDs to offsets in the 
+   *                          edit log file.
+   * @return                  The new edit log file name.
+   * @throws IOException
+   */
+  static private File prepareUnfinalizedTestEditLog(File testDir, int numTx,
+      SortedMap<Long, Long> offsetToTxId) throws IOException {
+    File inProgressFile = new File(testDir, NNStorage.getInProgressEditsFileName(1));
+    FSEditLog fsel = null, spyLog = null;
+    try {
+      fsel = FSImageTestUtil.createStandaloneEditLog(testDir);
+      spyLog = spy(fsel);
+      // Normally, the in-progress edit log would be finalized by
+      // FSEditLog#endCurrentLogSegment.  For testing purposes, we
+      // disable that here.
+      doNothing().when(spyLog).endCurrentLogSegment(true);
+      spyLog.openForWrite();
+      assertTrue("should exist: " + inProgressFile, inProgressFile.exists());
+      
+      for (int i = 0; i < numTx; i++) {
+        long trueOffset = getNonTrailerLength(inProgressFile);
+        long thisTxId = spyLog.getLastWrittenTxId() + 1;
+        offsetToTxId.put(trueOffset, thisTxId);
+        System.err.println("txid " + thisTxId + " at offset " + trueOffset);
+        spyLog.logDelete("path" + i, i);
+        spyLog.logSync();
+      }
+    } finally {
+      if (spyLog != null) {
+        spyLog.close();
+      } else if (fsel != null) {
+        fsel.close();
+      }
+    }
+    return inProgressFile;
+  }
+
+  @Test
+  public void testValidateEditLogWithCorruptHeader() throws IOException {
+    File testDir = new File(TEST_DIR, "testValidateEditLogWithCorruptHeader");
+    SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap();
+    File logFile = prepareUnfinalizedTestEditLog(testDir, 2, offsetToTxId);
+    RandomAccessFile rwf = new RandomAccessFile(logFile, "rw");
+    try {
+      rwf.seek(0);
+      rwf.writeLong(42); // corrupt header
+    } finally {
+      rwf.close();
+    }
+    EditLogValidation validation = EditLogFileInputStream.validateEditLog(logFile);
+    assertTrue(validation.hasCorruptHeader());
+  }
+
+  @Test
+  public void testValidateEmptyEditLog() throws IOException {
+    File testDir = new File(TEST_DIR, "testValidateEmptyEditLog");
+    SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap();
+    File logFile = prepareUnfinalizedTestEditLog(testDir, 0, offsetToTxId);
+    // Truncate the file so that there is nothing except the header
+    truncateFile(logFile, 4);
+    EditLogValidation validation =
+        EditLogFileInputStream.validateEditLog(logFile);
+    assertTrue(!validation.hasCorruptHeader());
+    assertEquals(HdfsConstants.INVALID_TXID, validation.getEndTxId());
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
index 0ac1944..e972f59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
@@ -20,6 +20,7 @@
 import static org.junit.Assert.*;
 
 import java.net.URI;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Iterator;
@@ -29,10 +30,14 @@
 import java.io.FilenameFilter;
 import java.io.IOException;
 import org.junit.Test;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.JournalManager.CorruptionException;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.setupEdits;
 import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec;
@@ -40,10 +45,52 @@
 import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_FAIL;
 
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.TreeMultiset;
 import com.google.common.base.Joiner;
 
 public class TestFileJournalManager {
+  static final Log LOG = LogFactory.getLog(TestFileJournalManager.class);
 
+  /**
+   * Find out how many transactions we can read from a
+   * FileJournalManager, starting at a given transaction ID.
+   * 
+   * @param jm              The journal manager
+   * @param fromTxId        Transaction ID to start at
+   * @param inProgressOk    Should we consider edit logs that are not finalized?
+   * @return                The number of transactions
+   * @throws IOException
+   */
+  static long getNumberOfTransactions(FileJournalManager jm, long fromTxId,
+      boolean inProgressOk, boolean abortOnGap) throws IOException {
+    long numTransactions = 0, txId = fromTxId;
+    final TreeMultiset<EditLogInputStream> allStreams =
+        TreeMultiset.create(JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
+    jm.selectInputStreams(allStreams, fromTxId, inProgressOk);
+
+    try {
+      for (EditLogInputStream elis : allStreams) {
+        elis.skipUntil(txId);
+        while (true) {
+          FSEditLogOp op = elis.readOp();
+          if (op == null) {
+            break;
+          }
+          if (abortOnGap && (op.getTransactionId() != txId)) {
+            LOG.info("getNumberOfTransactions: detected gap at txId " +
+                fromTxId);
+            return numTransactions;
+          }
+          txId = op.getTransactionId() + 1;
+          numTransactions++;
+        }
+      }
+    } finally {
+      IOUtils.cleanup(LOG, allStreams.toArray(new EditLogInputStream[0]));
+    }
+    return numTransactions;
+  }
+  
   /** 
    * Test the normal operation of loading transactions from
    * file journal manager. 3 edits directories are setup without any
@@ -61,7 +108,7 @@
     long numJournals = 0;
     for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
       FileJournalManager jm = new FileJournalManager(sd, storage);
-      assertEquals(6*TXNS_PER_ROLL, jm.getNumberOfTransactions(1, true));
+      assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false));
       numJournals++;
     }
     assertEquals(3, numJournals);
@@ -82,7 +129,7 @@
 
     FileJournalManager jm = new FileJournalManager(sd, storage);
     assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, 
-                 jm.getNumberOfTransactions(1, true));
+                 getNumberOfTransactions(jm, 1, true, false));
   }
 
   /**
@@ -104,16 +151,16 @@
     Iterator<StorageDirectory> dirs = storage.dirIterator(NameNodeDirType.EDITS);
     StorageDirectory sd = dirs.next();
     FileJournalManager jm = new FileJournalManager(sd, storage);
-    assertEquals(6*TXNS_PER_ROLL, jm.getNumberOfTransactions(1, true));
+    assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false));
     
     sd = dirs.next();
     jm = new FileJournalManager(sd, storage);
-    assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, jm.getNumberOfTransactions(1,
-        true));
+    assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1,
+        true, false));
 
     sd = dirs.next();
     jm = new FileJournalManager(sd, storage);
-    assertEquals(6*TXNS_PER_ROLL, jm.getNumberOfTransactions(1, true));
+    assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false));
   }
 
   /** 
@@ -137,18 +184,18 @@
     Iterator<StorageDirectory> dirs = storage.dirIterator(NameNodeDirType.EDITS);
     StorageDirectory sd = dirs.next();
     FileJournalManager jm = new FileJournalManager(sd, storage);
-    assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, jm.getNumberOfTransactions(1,
-        true));
+    assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1,
+        true, false));
     
     sd = dirs.next();
     jm = new FileJournalManager(sd, storage);
-    assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, jm.getNumberOfTransactions(1,
-        true));
+    assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1,
+        true, false));
 
     sd = dirs.next();
     jm = new FileJournalManager(sd, storage);
-    assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, jm.getNumberOfTransactions(1,
-        true));
+    assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1,
+        true, false));
   }
 
   /** 
@@ -198,24 +245,15 @@
 
     FileJournalManager jm = new FileJournalManager(sd, storage);
     long expectedTotalTxnCount = TXNS_PER_ROLL*10 + TXNS_PER_FAIL;
-    assertEquals(expectedTotalTxnCount, jm.getNumberOfTransactions(1, true));
+    assertEquals(expectedTotalTxnCount, getNumberOfTransactions(jm, 1,
+        true, false));
 
     long skippedTxns = (3*TXNS_PER_ROLL); // skip first 3 files
     long startingTxId = skippedTxns + 1; 
 
-    long numTransactionsToLoad = jm.getNumberOfTransactions(startingTxId, true);
-    long numLoaded = 0;
-    while (numLoaded < numTransactionsToLoad) {
-      EditLogInputStream editIn = jm.getInputStream(startingTxId, true);
-      FSEditLogLoader.EditLogValidation val = FSEditLogLoader.validateEditLog(editIn);
-      long count = val.getNumTransactions();
-
-      editIn.close();
-      startingTxId += count;
-      numLoaded += count;
-    }
-
-    assertEquals(expectedTotalTxnCount - skippedTxns, numLoaded); 
+    long numLoadable = getNumberOfTransactions(jm, startingTxId,
+        true, false);
+    assertEquals(expectedTotalTxnCount - skippedTxns, numLoadable); 
   }
 
   /**
@@ -236,8 +274,8 @@
     // 10 rolls, so 11 rolled files, 110 txids total.
     final int TOTAL_TXIDS = 10 * 11;
     for (int txid = 1; txid <= TOTAL_TXIDS; txid++) {
-      assertEquals((TOTAL_TXIDS - txid) + 1, jm.getNumberOfTransactions(txid,
-          true));
+      assertEquals((TOTAL_TXIDS - txid) + 1, getNumberOfTransactions(jm, txid,
+          true, false));
     }
   }
 
@@ -269,19 +307,13 @@
     assertTrue(files[0].delete());
     
     FileJournalManager jm = new FileJournalManager(sd, storage);
-    assertEquals(startGapTxId-1, jm.getNumberOfTransactions(1, true));
+    assertEquals(startGapTxId-1, getNumberOfTransactions(jm, 1, true, true));
 
-    try {
-      jm.getNumberOfTransactions(startGapTxId, true);
-      fail("Should have thrown an exception by now");
-    } catch (IOException ioe) {
-      GenericTestUtils.assertExceptionContains(
-          "Gap in transactions, max txnid is 110, 0 txns from 31", ioe);
-    }
+    assertEquals(0, getNumberOfTransactions(jm, startGapTxId, true, true));
 
     // rolled 10 times so there should be 11 files.
     assertEquals(11*TXNS_PER_ROLL - endGapTxId, 
-                 jm.getNumberOfTransactions(endGapTxId + 1, true));
+                 getNumberOfTransactions(jm, endGapTxId + 1, true, true));
   }
 
   /** 
@@ -308,7 +340,7 @@
 
     FileJournalManager jm = new FileJournalManager(sd, storage);
     assertEquals(10*TXNS_PER_ROLL+1, 
-                 jm.getNumberOfTransactions(1, true));
+                 getNumberOfTransactions(jm, 1, true, false));
   }
 
   @Test
@@ -345,6 +377,33 @@
     FileJournalManager.matchEditLogs(badDir);
   }
   
+  private static EditLogInputStream getJournalInputStream(JournalManager jm,
+      long txId, boolean inProgressOk) throws IOException {
+    final TreeMultiset<EditLogInputStream> allStreams =
+        TreeMultiset.create(JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
+    jm.selectInputStreams(allStreams, txId, inProgressOk);
+    try {
+      for (Iterator<EditLogInputStream> iter = allStreams.iterator();
+          iter.hasNext();) {
+        EditLogInputStream elis = iter.next();
+        if (elis.getFirstTxId() > txId) {
+          break;
+        }
+        if (elis.getLastTxId() < txId) {
+          iter.remove();
+          elis.close();
+          continue;
+        }
+        elis.skipUntil(txId);
+        iter.remove();
+        return elis;
+      }
+    } finally {
+      IOUtils.cleanup(LOG,  allStreams.toArray(new EditLogInputStream[0]));
+    }
+    return null;
+  }
+    
   /**
    * Make sure that we starting reading the correct op when we request a stream
    * with a txid in the middle of an edit log file.
@@ -359,7 +418,7 @@
     
     FileJournalManager jm = new FileJournalManager(sd, storage);
     
-    EditLogInputStream elis = jm.getInputStream(5, true);
+    EditLogInputStream elis = getJournalInputStream(jm, 5, true);
     FSEditLogOp op = elis.readOp();
     assertEquals("read unexpected op", op.getTransactionId(), 5);
   }
@@ -381,9 +440,9 @@
     FileJournalManager jm = new FileJournalManager(sd, storage);
     
     // If we exclude the in-progess stream, we should only have 100 tx.
-    assertEquals(100, jm.getNumberOfTransactions(1, false));
+    assertEquals(100, getNumberOfTransactions(jm, 1, false, false));
     
-    EditLogInputStream elis = jm.getInputStream(90, false);
+    EditLogInputStream elis = getJournalInputStream(jm, 90, false);
     FSEditLogOp lastReadOp = null;
     while ((lastReadOp = elis.readOp()) != null) {
       assertTrue(lastReadOp.getTransactionId() <= 100);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index f7626ad..9b59802 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -18,21 +18,27 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.junit.Assert.*;
+
 import java.io.BufferedReader;
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.FileReader;
 import java.io.IOException;
 import java.io.PrintStream;
+import java.io.PrintWriter;
 import java.io.RandomAccessFile;
+import java.io.StringWriter;
+import java.io.Writer;
+import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.nio.channels.FileChannel;
 import java.security.PrivilegedExceptionAction;
+import java.util.HashMap;
+import java.util.Map;
 import java.util.Random;
 import java.util.regex.Pattern;
 
-import junit.framework.TestCase;
-
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -42,25 +48,30 @@
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.log4j.PatternLayout;
 import org.apache.log4j.RollingFileAppender;
+import org.junit.Test;
 
 /**
  * A JUnit test for doing fsck
  */
-public class TestFsck extends TestCase {
+public class TestFsck {
   static final String auditLogFile = System.getProperty("test.build.dir",
       "build/test") + "/audit.log";
   
@@ -79,13 +90,15 @@
     PrintStream out = new PrintStream(bStream, true);
     ((Log4JLogger)FSPermissionChecker.LOG).getLogger().setLevel(Level.ALL);
     int errCode = ToolRunner.run(new DFSck(conf, out), path);
-    if (checkErrorCode)
+    if (checkErrorCode) {
       assertEquals(expectedErrCode, errCode);
+    }
     ((Log4JLogger)FSPermissionChecker.LOG).getLogger().setLevel(Level.INFO);
     return bStream.toString();
   }
 
   /** do fsck */
+  @Test
   public void testFsck() throws Exception {
     DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8*1024);
     MiniDFSCluster cluster = null;
@@ -158,6 +171,7 @@
     assertNull("Unexpected event in audit log", reader.readLine());
   }
   
+  @Test
   public void testFsckNonExistent() throws Exception {
     DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8*1024);
     MiniDFSCluster cluster = null;
@@ -180,6 +194,7 @@
   }
 
   /** Test fsck with permission set on inodes */
+  @Test
   public void testFsckPermission() throws Exception {
     final DFSTestUtil util = new DFSTestUtil(getClass().getSimpleName(), 20, 3, 8*1024);
     final Configuration conf = new HdfsConfiguration();
@@ -227,6 +242,7 @@
     }
   }
 
+  @Test
   public void testFsckMoveAndDelete() throws Exception {
     final int MAX_MOVE_TRIES = 5;
     DFSTestUtil util = new DFSTestUtil("TestFsck", 5, 3, 8*1024);
@@ -300,6 +316,7 @@
     }
   }
   
+  @Test
   public void testFsckOpenFiles() throws Exception {
     DFSTestUtil util = new DFSTestUtil("TestFsck", 4, 3, 8*1024); 
     MiniDFSCluster cluster = null;
@@ -350,6 +367,7 @@
     }
   }
 
+  @Test
   public void testCorruptBlock() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
@@ -426,6 +444,7 @@
    * 
    * @throws Exception
    */
+  @Test
   public void testFsckError() throws Exception {
     MiniDFSCluster cluster = null;
     try {
@@ -460,6 +479,7 @@
   }
   
   /** check if option -list-corruptfiles of fsck command works properly */
+  @Test
   public void testFsckListCorruptFilesBlocks() throws Exception {
     Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
@@ -529,6 +549,7 @@
    * Test for checking fsck command on illegal arguments should print the proper
    * usage.
    */
+  @Test
   public void testToCheckTheFsckCommandOnIllegalArguments() throws Exception {
     MiniDFSCluster cluster = null;
     try {
@@ -560,4 +581,73 @@
       }
     }
   }
+  
+  /**
+   * Tests that the # of missing block replicas and expected replicas is correct
+   * @throws IOException
+   */
+  @Test
+  public void testFsckMissingReplicas() throws IOException {
+    // Desired replication factor
+    // Set this higher than NUM_REPLICAS so it's under-replicated
+    final short REPL_FACTOR = 2;
+    // Number of replicas to actually start
+    final short NUM_REPLICAS = 1;
+    // Number of blocks to write
+    final short NUM_BLOCKS = 3;
+    // Set a small-ish blocksize
+    final long blockSize = 512;
+    
+    Configuration conf = new Configuration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+    
+    MiniDFSCluster cluster = null;
+    DistributedFileSystem dfs = null;
+    
+    try {
+      // Startup a minicluster
+      cluster = 
+          new MiniDFSCluster.Builder(conf).numDataNodes(NUM_REPLICAS).build();
+      assertNotNull("Failed Cluster Creation", cluster);
+      cluster.waitClusterUp();
+      dfs = (DistributedFileSystem) cluster.getFileSystem();
+      assertNotNull("Failed to get FileSystem", dfs);
+      
+      // Create a file that will be intentionally under-replicated
+      final String pathString = new String("/testfile");
+      final Path path = new Path(pathString);
+      long fileLen = blockSize * NUM_BLOCKS;
+      DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
+      
+      // Create an under-replicated file
+      NameNode namenode = cluster.getNameNode();
+      NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
+          .getDatanodeManager().getNetworkTopology();
+      Map<String,String[]> pmap = new HashMap<String, String[]>();
+      Writer result = new StringWriter();
+      PrintWriter out = new PrintWriter(result, true);
+      InetAddress remoteAddress = InetAddress.getLocalHost();
+      NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
+          NUM_REPLICAS, (short)1, remoteAddress);
+      
+      // Run the fsck and check the Result
+      final HdfsFileStatus file = 
+          namenode.getRpcServer().getFileInfo(pathString);
+      assertNotNull(file);
+      Result res = new Result(conf);
+      fsck.check(pathString, file, res);
+      // Also print the output from the fsck, for ex post facto sanity checks
+      System.out.println(result.toString());
+      assertEquals(res.missingReplicas, 
+          (NUM_BLOCKS*REPL_FACTOR) - (NUM_BLOCKS*NUM_REPLICAS));
+      assertEquals(res.numExpectedReplicas, NUM_BLOCKS*REPL_FACTOR);
+    } finally {
+      if(dfs != null) {
+        dfs.close();
+      }
+      if(cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java
index 51e49a9..f21f65e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Test;
 
 import static org.mockito.Mockito.mock;
@@ -26,9 +24,9 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.io.Writable;
 
 import java.net.URI;
+import java.util.Collection;
 import java.io.IOException;
 
 public class TestGenericJournalConf {
@@ -144,15 +142,8 @@
     }
 
     @Override
-    public EditLogInputStream getInputStream(long fromTxnId, boolean inProgressOk)
-        throws IOException {
-      return null;
-    }
-
-    @Override
-    public long getNumberOfTransactions(long fromTxnId, boolean inProgressOk)
-        throws IOException {
-      return 0;
+    public void selectInputStreams(Collection<EditLogInputStream> streams,
+        long fromTxnId, boolean inProgressOk) {
     }
 
     @Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
index c574cb3..62fac19 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
@@ -21,20 +21,29 @@
 
 import java.io.IOException;
 
+import javax.servlet.ServletContext;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.util.KerberosName;
+import org.apache.hadoop.security.authorize.AccessControlList;
 import org.junit.Test;
+import org.mockito.ArgumentMatcher;
+import org.mockito.Mockito;
 
 public class TestGetImageServlet {
   
   @Test
-  public void testIsValidRequestorWithHa() throws IOException {
+  public void testIsValidRequestor() throws IOException {
     Configuration conf = new HdfsConfiguration();
+    KerberosName.setRules("RULE:[1:$1]\nRULE:[2:$1]");
     
     // Set up generic HA configs.
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "ns1");
+    conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
     conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
         "ns1"), "nn1,nn2");
     
@@ -53,8 +62,33 @@
     // Initialize this conf object as though we're running on NN1.
     NameNode.initializeGenericKeys(conf, "ns1", "nn1");
     
+    AccessControlList acls = Mockito.mock(AccessControlList.class);
+    Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
+    ServletContext context = Mockito.mock(ServletContext.class);
+    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
+    
     // Make sure that NN2 is considered a valid fsimage/edits requestor.
-    assertTrue(GetImageServlet.isValidRequestor("hdfs/host2@TEST-REALM.COM",
-        conf));
+    assertTrue(GetImageServlet.isValidRequestor(context,
+        "hdfs/host2@TEST-REALM.COM", conf));
+    
+    // Mark atm as an admin.
+    Mockito.when(acls.isUserAllowed(Mockito.argThat(new ArgumentMatcher<UserGroupInformation>() {
+      @Override
+      public boolean matches(Object argument) {
+        return ((UserGroupInformation) argument).getShortUserName().equals("atm");
+      }
+    }))).thenReturn(true);
+    
+    // Make sure that NN2 is still considered a valid requestor.
+    assertTrue(GetImageServlet.isValidRequestor(context,
+        "hdfs/host2@TEST-REALM.COM", conf));
+    
+    // Make sure an admin is considered a valid requestor.
+    assertTrue(GetImageServlet.isValidRequestor(context,
+        "atm@TEST-REALM.COM", conf));
+    
+    // Make sure other users are *not* considered valid requestors.
+    assertFalse(GetImageServlet.isValidRequestor(context,
+        "todd@TEST-REALM.COM", conf));
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
index 6968096..608ee26 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
@@ -25,6 +25,8 @@
 import java.util.Set;
 
 import static org.junit.Assert.*;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.spy;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -37,7 +39,6 @@
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
@@ -213,15 +214,129 @@
   public void testSkipEdit() throws IOException {
     runEditLogTest(new EltsTestGarbageInEditLog());
   }
-  
-  /** Test that we can successfully recover from a situation where the last
-   * entry in the edit log has been truncated. */
-  @Test(timeout=180000)
-  public void testRecoverTruncatedEditLog() throws IOException {
+
+  /**
+   * An algorithm for corrupting an edit log.
+   */
+  static interface Corruptor {
+    /*
+     * Corrupt an edit log file.
+     *
+     * @param editFile   The edit log file
+     */
+    public void corrupt(File editFile) throws IOException;
+
+    /*
+     * Explain whether we need to read the log in recovery mode
+     *
+     * @param finalized  True if the edit log in question is finalized.
+     *                   We're a little more lax about reading unfinalized
+     *                   logs.  We will allow a small amount of garbage at
+     *                   the end.  In a finalized log, every byte must be
+     *                   perfect.
+     *
+     * @return           Whether we need to read the log in recovery mode
+     */
+    public boolean needRecovery(boolean finalized);
+
+    /*
+     * Get the name of this corruptor
+     *
+     * @return           The Corruptor name
+     */
+    public String getName();
+  }
+
+  static class TruncatingCorruptor implements Corruptor {
+    @Override
+    public void corrupt(File editFile) throws IOException {
+      // Corrupt the last edit
+      long fileLen = editFile.length();
+      RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
+      rwf.setLength(fileLen - 1);
+      rwf.close();
+    }
+
+    @Override
+    public boolean needRecovery(boolean finalized) {
+      return finalized;
+    }
+
+    @Override
+    public String getName() {
+      return "truncated";
+    }
+  }
+
+  static class PaddingCorruptor implements Corruptor {
+    @Override
+    public void corrupt(File editFile) throws IOException {
+      // Add junk to the end of the file
+      RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
+      rwf.seek(editFile.length());
+      for (int i = 0; i < 129; i++) {
+        rwf.write((byte)0);
+      }
+      rwf.write(0xd);
+      rwf.write(0xe);
+      rwf.write(0xa);
+      rwf.write(0xd);
+      rwf.close();
+    }
+
+    @Override
+    public boolean needRecovery(boolean finalized) {
+      // With finalized edit logs, we ignore what's at the end as long as we
+      // can make it to the correct transaction ID.
+      // With unfinalized edit logs, the finalization process ignores garbage
+      // at the end.
+      return false;
+    }
+
+    @Override
+    public String getName() {
+      return "padFatal";
+    }
+  }
+
+  static class SafePaddingCorruptor implements Corruptor {
+    private byte padByte;
+
+    public SafePaddingCorruptor(byte padByte) {
+      this.padByte = padByte;
+      assert ((this.padByte == 0) || (this.padByte == -1));
+    }
+
+    @Override
+    public void corrupt(File editFile) throws IOException {
+      // Add junk to the end of the file
+      RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
+      rwf.seek(editFile.length());
+      rwf.write((byte)-1);
+      for (int i = 0; i < 1024; i++) {
+        rwf.write(padByte);
+      }
+      rwf.close();
+    }
+
+    @Override
+    public boolean needRecovery(boolean finalized) {
+      return false;
+    }
+
+    @Override
+    public String getName() {
+      return "pad" + ((int)padByte);
+    }
+  }
+
+  static void testNameNodeRecoveryImpl(Corruptor corruptor, boolean finalize)
+      throws IOException {
     final String TEST_PATH = "/test/path/dir";
-    final int NUM_TEST_MKDIRS = 10;
-    
-    // start a cluster 
+    final String TEST_PATH2 = "/second/dir";
+    final boolean needRecovery = corruptor.needRecovery(finalize);
+
+    // start a cluster
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = null;
     FileSystem fileSys = null;
@@ -230,12 +345,20 @@
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
           .build();
       cluster.waitActive();
+      if (!finalize) {
+        // Normally, the in-progress edit log would be finalized by
+        // FSEditLog#endCurrentLogSegment.  For testing purposes, we
+        // disable that here.
+        FSEditLog spyLog =
+            spy(cluster.getNameNode().getFSImage().getEditLog());
+        doNothing().when(spyLog).endCurrentLogSegment(true);
+        cluster.getNameNode().getFSImage().setEditLogForTesting(spyLog);
+      }
       fileSys = cluster.getFileSystem();
       final FSNamesystem namesystem = cluster.getNamesystem();
       FSImage fsimage = namesystem.getFSImage();
-      for (int i = 0; i < NUM_TEST_MKDIRS; i++) {
-        fileSys.mkdirs(new Path(TEST_PATH));
-      }
+      fileSys.mkdirs(new Path(TEST_PATH));
+      fileSys.mkdirs(new Path(TEST_PATH2));
       sd = fsimage.getStorage().dirIterator(NameNodeDirType.EDITS).next();
     } finally {
       if (cluster != null) {
@@ -246,13 +369,12 @@
     File editFile = FSImageTestUtil.findLatestEditsLog(sd).getFile();
     assertTrue("Should exist: " + editFile, editFile.exists());
 
-    // Corrupt the last edit
-    long fileLen = editFile.length();
-    RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
-    rwf.setLength(fileLen - 1);
-    rwf.close();
-    
-    // Make sure that we can't start the cluster normally before recovery
+    // Corrupt the edit log
+    LOG.info("corrupting edit log file '" + editFile + "'");
+    corruptor.corrupt(editFile);
+
+    // If needRecovery == true, make sure that we can't start the
+    // cluster normally before recovery
     cluster = null;
     try {
       LOG.debug("trying to start normally (this should fail)...");
@@ -260,16 +382,24 @@
           .format(false).build();
       cluster.waitActive();
       cluster.shutdown();
-      fail("expected the truncated edit log to prevent normal startup");
+      if (needRecovery) {
+        fail("expected the corrupted edit log to prevent normal startup");
+      }
     } catch (IOException e) {
-      // success
+      if (!needRecovery) {
+        LOG.error("Got unexpected failure with " + corruptor.getName() +
+            corruptor, e);
+        fail("got unexpected exception " + e.getMessage());
+      }
     } finally {
       if (cluster != null) {
         cluster.shutdown();
       }
     }
-    
-    // Perform recovery
+
+    // Perform NameNode recovery.
+    // Even if there was nothing wrong previously (needRecovery == false),
+    // this should still work fine.
     cluster = null;
     try {
       LOG.debug("running recovery...");
@@ -277,22 +407,23 @@
           .format(false).startupOption(recoverStartOpt).build();
     } catch (IOException e) {
       fail("caught IOException while trying to recover. " +
-          "message was " + e.getMessage() + 
+          "message was " + e.getMessage() +
           "\nstack trace\n" + StringUtils.stringifyException(e));
     } finally {
       if (cluster != null) {
         cluster.shutdown();
       }
     }
-    
+
     // Make sure that we can start the cluster normally after recovery
     cluster = null;
     try {
       LOG.debug("starting cluster normally after recovery...");
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
           .format(false).build();
-      LOG.debug("testRecoverTruncatedEditLog: successfully recovered the " +
-          "truncated edit log");
+      LOG.debug("successfully recovered the " + corruptor.getName() +
+          " corrupted edit log");
+      cluster.waitActive();
       assertTrue(cluster.getFileSystem().exists(new Path(TEST_PATH)));
     } catch (IOException e) {
       fail("failed to recover.  Error message: " + e.getMessage());
@@ -302,4 +433,36 @@
       }
     }
   }
+
+  /** Test that we can successfully recover from a situation where the last
+   * entry in the edit log has been truncated. */
+  @Test(timeout=180000)
+  public void testRecoverTruncatedEditLog() throws IOException {
+    testNameNodeRecoveryImpl(new TruncatingCorruptor(), true);
+    testNameNodeRecoveryImpl(new TruncatingCorruptor(), false);
+  }
+
+  /** Test that we can successfully recover from a situation where the last
+   * entry in the edit log has been padded with garbage. */
+  @Test(timeout=180000)
+  public void testRecoverPaddedEditLog() throws IOException {
+    testNameNodeRecoveryImpl(new PaddingCorruptor(), true);
+    testNameNodeRecoveryImpl(new PaddingCorruptor(), false);
+  }
+
+  /** Test that don't need to recover from a situation where the last
+   * entry in the edit log has been padded with 0. */
+  @Test(timeout=180000)
+  public void testRecoverZeroPaddedEditLog() throws IOException {
+    testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)0), true);
+    testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)0), false);
+  }
+
+  /** Test that don't need to recover from a situation where the last
+   * entry in the edit log has been padded with 0xff bytes. */
+  @Test(timeout=180000)
+  public void testRecoverNegativeOnePaddedEditLog() throws IOException {
+    testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)-1), true);
+    testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)-1), false);
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
index ed2162b..5808ea4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
@@ -51,6 +51,7 @@
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.util.Canceler;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -517,14 +518,15 @@
     
     try {
       doAnEdit(fsn, 1);
-
+      final Canceler canceler = new Canceler();
+      
       // Save namespace
       fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
       try {
         Future<Void> saverFuture = pool.submit(new Callable<Void>() {
           @Override
           public Void call() throws Exception {
-            image.saveNamespace(finalFsn);
+            image.saveNamespace(finalFsn, canceler);
             return null;
           }
         });
@@ -534,7 +536,7 @@
         // then cancel the saveNamespace
         Future<Void> cancelFuture = pool.submit(new Callable<Void>() {
           public Void call() throws Exception {
-            image.cancelSaveNamespace("cancelled");
+            canceler.cancel("cancelled");
             return null;
           }
         });
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
index 72c2876..37e2967 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
@@ -92,7 +92,7 @@
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
         "127.0.0.1:0");
     
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "ns1");
+    conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
     
     // Set a nameservice-specific configuration for name dir
     File dir = new File(MiniDFSCluster.getBaseDirectory(),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
index e7101c6..ce93851 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
@@ -185,7 +185,7 @@
     conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
         logicalName, nameNodeId2), address2);
     
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, logicalName);
+    conf.set(DFSConfigKeys.DFS_NAMESERVICES, logicalName);
     conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, logicalName),
         nameNodeId1 + "," + nameNodeId2);
     conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
index b26e85a..4f93f4f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
@@ -23,6 +23,7 @@
 import java.net.URI;
 import java.util.Collections;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -35,6 +36,7 @@
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.After;
@@ -43,6 +45,7 @@
 
 import com.google.common.base.Suppliers;
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
 
 import static org.junit.Assert.*;
@@ -177,7 +180,7 @@
       logs.stopCapturing();
     }
     GenericTestUtils.assertMatches(logs.getOutput(),
-        "FATAL.*Unable to read transaction ids 1-4 from the configured shared");
+        "FATAL.*Unable to read transaction ids 1-3 from the configured shared");
   }
   
   @Test
@@ -195,30 +198,29 @@
     assertEquals(0, rc);
   }
   
+  /**
+   * Test that, even if the other node is not active, we are able
+   * to bootstrap standby from it.
+   */
   @Test(timeout=30000)
   public void testOtherNodeNotActive() throws Exception {
     cluster.transitionToStandby(0);
     int rc = BootstrapStandby.run(
-        new String[]{"-nonInteractive"},
-        cluster.getConfiguration(1));
-    assertEquals(BootstrapStandby.ERR_CODE_OTHER_NN_NOT_ACTIVE, rc);
-    
-    // Answer "yes" to the prompt about transition to active
-    System.setIn(new ByteArrayInputStream("yes\n".getBytes()));
-    rc = BootstrapStandby.run(
         new String[]{"-force"},
         cluster.getConfiguration(1));
     assertEquals(0, rc);
-    
-    assertFalse(nn0.getNamesystem().isInStandbyState());
   }
-
+  
   private void assertNNFilesMatch() throws Exception {
     List<File> curDirs = Lists.newArrayList();
     curDirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0));
     curDirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 1));
+    
+    // Ignore seen_txid file, since the newly bootstrapped standby
+    // will have a higher seen_txid than the one it bootstrapped from.
+    Set<String> ignoredFiles = ImmutableSet.of("seen_txid");
     FSImageTestUtil.assertParallelFilesAreIdentical(curDirs,
-        Collections.<String>emptySet());
+        ignoredFiles);
   }
 
   private void removeStandbyNameDirs() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
new file mode 100644
index 0000000..52e1369
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
@@ -0,0 +1,220 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.ha;
+
+import static org.junit.Assert.*;
+
+import java.util.concurrent.TimeoutException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.ha.ClientBaseWithFixes;
+import org.apache.hadoop.ha.HealthMonitor;
+import org.apache.hadoop.ha.ZKFCTestUtil;
+import org.apache.hadoop.ha.ZKFailoverController;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.TestNodeFencer.AlwaysSucceedFencer;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.tools.DFSHAAdmin;
+import org.apache.hadoop.hdfs.tools.DFSZKFailoverController;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
+import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+
+import com.google.common.base.Supplier;
+
+public class TestDFSZKFailoverController extends ClientBaseWithFixes {
+  private Configuration conf;
+  private MiniDFSCluster cluster;
+  private TestContext ctx;
+  private ZKFCThread thr1, thr2;
+  private FileSystem fs;
+  
+  @Before
+  public void setup() throws Exception {
+    conf = new Configuration();
+    // Specify the quorum per-nameservice, to ensure that these configs
+    // can be nameservice-scoped.
+    conf.set(ZKFailoverController.ZK_QUORUM_KEY + ".ns1", hostPort);
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
+        AlwaysSucceedFencer.class.getName());
+    conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
+
+    // Turn off IPC client caching, so that the suite can handle
+    // the restart of the daemons between test cases.
+    conf.setInt(
+        CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
+        0);
+    
+    conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1", 10003);
+    conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", 10004);
+
+    MiniDFSNNTopology topology = new MiniDFSNNTopology()
+    .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+        .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001))
+        .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002)));
+    cluster = new MiniDFSCluster.Builder(conf)
+        .nnTopology(topology)
+        .numDataNodes(0)
+        .build();
+    cluster.waitActive();
+
+    ctx = new TestContext();
+    ctx.addThread(thr1 = new ZKFCThread(ctx, 0));
+    assertEquals(0, thr1.zkfc.run(new String[]{"-formatZK"}));
+
+    thr1.start();
+    waitForHAState(0, HAServiceState.ACTIVE);
+    
+    ctx.addThread(thr2 = new ZKFCThread(ctx, 1));
+    thr2.start();
+    
+    // Wait for the ZKFCs to fully start up
+    ZKFCTestUtil.waitForHealthState(thr1.zkfc,
+        HealthMonitor.State.SERVICE_HEALTHY, ctx);
+    ZKFCTestUtil.waitForHealthState(thr2.zkfc,
+        HealthMonitor.State.SERVICE_HEALTHY, ctx);
+    
+    fs = HATestUtil.configureFailoverFs(cluster, conf);
+  }
+  
+  @After
+  public void shutdown() throws Exception {
+    cluster.shutdown();
+    
+    if (thr1 != null) {
+      thr1.interrupt();
+    }
+    if (thr2 != null) {
+      thr2.interrupt();
+    }
+    if (ctx != null) {
+      ctx.stop();
+    }
+  }
+  
+  /**
+   * Test that automatic failover is triggered by shutting the
+   * active NN down.
+   */
+  @Test(timeout=30000)
+  public void testFailoverAndBackOnNNShutdown() throws Exception {
+    Path p1 = new Path("/dir1");
+    Path p2 = new Path("/dir2");
+    
+    // Write some data on the first NN
+    fs.mkdirs(p1);
+    // Shut it down, causing automatic failover
+    cluster.shutdownNameNode(0);
+    // Data should still exist. Write some on the new NN
+    assertTrue(fs.exists(p1));
+    fs.mkdirs(p2);
+    assertEquals(AlwaysSucceedFencer.getLastFencedService().getAddress(),
+        thr1.zkfc.getLocalTarget().getAddress());
+    
+    // Start the first node back up
+    cluster.restartNameNode(0);
+    // This should have no effect -- the new node should be STANDBY.
+    waitForHAState(0, HAServiceState.STANDBY);
+    assertTrue(fs.exists(p1));
+    assertTrue(fs.exists(p2));
+    // Shut down the second node, which should failback to the first
+    cluster.shutdownNameNode(1);
+    waitForHAState(0, HAServiceState.ACTIVE);
+
+    // First node should see what was written on the second node while it was down.
+    assertTrue(fs.exists(p1));
+    assertTrue(fs.exists(p2));
+    assertEquals(AlwaysSucceedFencer.getLastFencedService().getAddress(),
+        thr2.zkfc.getLocalTarget().getAddress());
+  }
+  
+  @Test(timeout=30000)
+  public void testManualFailover() throws Exception {
+    thr2.zkfc.getLocalTarget().getZKFCProxy(conf, 15000).gracefulFailover();
+    waitForHAState(0, HAServiceState.STANDBY);
+    waitForHAState(1, HAServiceState.ACTIVE);
+
+    thr1.zkfc.getLocalTarget().getZKFCProxy(conf, 15000).gracefulFailover();
+    waitForHAState(0, HAServiceState.ACTIVE);
+    waitForHAState(1, HAServiceState.STANDBY);
+  }
+  
+  @Test(timeout=30000)
+  public void testManualFailoverWithDFSHAAdmin() throws Exception {
+    DFSHAAdmin tool = new DFSHAAdmin();
+    tool.setConf(conf);
+    assertEquals(0, 
+        tool.run(new String[]{"-failover", "nn1", "nn2"}));
+    waitForHAState(0, HAServiceState.STANDBY);
+    waitForHAState(1, HAServiceState.ACTIVE);
+    assertEquals(0,
+        tool.run(new String[]{"-failover", "nn2", "nn1"}));
+    waitForHAState(0, HAServiceState.ACTIVE);
+    waitForHAState(1, HAServiceState.STANDBY);
+  }
+  
+  private void waitForHAState(int nnidx, final HAServiceState state)
+      throws TimeoutException, InterruptedException {
+    final NameNode nn = cluster.getNameNode(nnidx);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        try {
+          return nn.getRpcServer().getServiceStatus().getState() == state;
+        } catch (Exception e) {
+          e.printStackTrace();
+          return false;
+        }
+      }
+    }, 50, 5000);
+  }
+
+  /**
+   * Test-thread which runs a ZK Failover Controller corresponding
+   * to a given NameNode in the minicluster.
+   */
+  private class ZKFCThread extends TestingThread {
+    private final DFSZKFailoverController zkfc;
+
+    public ZKFCThread(TestContext ctx, int idx) {
+      super(ctx);
+      this.zkfc = DFSZKFailoverController.create(
+          cluster.getConfiguration(idx));
+    }
+
+    @Override
+    public void doWork() throws Exception {
+      try {
+        assertEquals(0, zkfc.run(new String[0]));
+      } catch (InterruptedException ie) {
+        // Interrupted by main thread, that's OK.
+      }
+    }
+  }
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java
index a245301..79dcec4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java
@@ -71,7 +71,7 @@
       // Set the first NN to active, make sure it creates edits
       // in its own dirs and the shared dir. The standby
       // should still have no edits!
-      cluster.getNameNode(0).getRpcServer().transitionToActive();
+      cluster.transitionToActive(0);
       
       assertEditFiles(cluster.getNameDirs(0),
           NNStorage.getInProgressEditsFileName(1));
@@ -107,7 +107,7 @@
       // If we restart NN0, it'll come back as standby, and we can
       // transition NN1 to active and make sure it reads edits correctly at this point.
       cluster.restartNameNode(0);
-      cluster.getNameNode(1).getRpcServer().transitionToActive();
+      cluster.transitionToActive(1);
 
       // NN1 should have both the edits that came before its restart, and the edits that
       // came after its restart.
@@ -134,7 +134,7 @@
           NNStorage.getInProgressEditsFileName(1));
 
       // Transition one of the NNs to active
-      cluster.getNameNode(0).getRpcServer().transitionToActive();
+      cluster.transitionToActive(0);
       
       // In the transition to active, it should have read the log -- and
       // hence see one of the dirs we made in the fake log.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
index 7bc2d8e..adcdc6b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
@@ -23,6 +23,7 @@
 import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Matchers.anyInt;
 import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.anyObject;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
@@ -47,6 +48,7 @@
 import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
+import org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.junit.After;
@@ -256,16 +258,21 @@
     // Shutdown the active NN.
     cluster.shutdownNameNode(0);
     
+    Runtime mockRuntime = mock(Runtime.class);
+    cluster.getNameNode(1).setRuntimeForTesting(mockRuntime);
+    verify(mockRuntime, times(0)).exit(anyInt());
     try {
       // Transition the standby to active.
       cluster.transitionToActive(1);
       fail("Standby transitioned to active, but should not have been able to");
     } catch (ServiceFailedException sfe) {
-      LOG.info("got expected exception: " + sfe.toString(), sfe);
+      Throwable sfeCause = sfe.getCause();
+      LOG.info("got expected exception: " + sfeCause.toString(), sfeCause);
       assertTrue("Standby failed to catch up for some reason other than "
-          + "failure to read logs", sfe.toString().contains(
+          + "failure to read logs", sfeCause.getCause().toString().contains(
               EditLogInputException.class.getName()));
     }
+    verify(mockRuntime, times(1)).exit(anyInt());
   }
   
   private LimitedEditLogAnswer causeFailureOnEditLogRead() throws IOException {
@@ -273,7 +280,7 @@
         .getEditLog());
     LimitedEditLogAnswer answer = new LimitedEditLogAnswer(); 
     doAnswer(answer).when(spyEditLog).selectInputStreams(
-        anyLong(), anyLong(), anyBoolean());
+        anyLong(), anyLong(), (MetaRecoveryContext)anyObject(), anyBoolean());
     nn1.getNamesystem().getEditLogTailer().setEditLog(spyEditLog);
     
     return answer;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java
index 9cd6ab7..abd7c72 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java
@@ -57,7 +57,7 @@
 
   private Configuration getHAConf(String nsId, String host1, String host2) {
     Configuration conf = new Configuration();
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nsId);    
+    conf.set(DFSConfigKeys.DFS_NAMESERVICES, nsId);    
     conf.set(DFSUtil.addKeySuffixes(
         DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, nsId),
         "nn1,nn2");    
@@ -72,10 +72,10 @@
   }
 
   @Test
-  public void testGetOtherNNHttpAddress() {
+  public void testGetOtherNNHttpAddress() throws IOException {
     // Use non-local addresses to avoid host address matching
     Configuration conf = getHAConf("ns1", "1.2.3.1", "1.2.3.2");
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "ns1");
+    conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, "ns1");
 
     // This is done by the NN before the StandbyCheckpointer is created
     NameNode.initializeGenericKeys(conf, "ns1", "nn1");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
index 8790d0f..5af5391 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
@@ -34,6 +34,8 @@
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HAUtil;
@@ -129,7 +131,8 @@
     DFSTestUtil
       .createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L);
     restartActive();
-    nn0.getRpcServer().transitionToActive();
+    nn0.getRpcServer().transitionToActive(
+        new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
 
     FSNamesystem namesystem = nn0.getNamesystem();
     String status = namesystem.getSafemode();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java
index 092bb5a..e44ebc9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java
@@ -37,6 +37,8 @@
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -71,6 +73,8 @@
   private static final String TEST_FILE_STR = TEST_FILE_PATH.toUri().getPath();
   private static final String TEST_FILE_DATA =
     "Hello state transitioning world";
+  private static final StateChangeRequestInfo REQ_INFO = new StateChangeRequestInfo(
+      RequestSource.REQUEST_BY_USER_FORCED);
   
   static {
     ((Log4JLogger)EditLogTailer.LOG).getLogger().setLevel(Level.ALL);
@@ -481,19 +485,19 @@
       assertFalse(isDTRunning(nn));
   
       banner("Transition 1->3. Should not start secret manager.");
-      nn.getRpcServer().transitionToActive();
+      nn.getRpcServer().transitionToActive(REQ_INFO);
       assertFalse(nn.isStandbyState());
       assertTrue(nn.isInSafeMode());
       assertFalse(isDTRunning(nn));
   
       banner("Transition 3->1. Should not start secret manager.");
-      nn.getRpcServer().transitionToStandby();
+      nn.getRpcServer().transitionToStandby(REQ_INFO);
       assertTrue(nn.isStandbyState());
       assertTrue(nn.isInSafeMode());
       assertFalse(isDTRunning(nn));
   
       banner("Transition 1->3->4. Should start secret manager.");
-      nn.getRpcServer().transitionToActive();
+      nn.getRpcServer().transitionToActive(REQ_INFO);
       NameNodeAdapter.leaveSafeMode(nn, false);
       assertFalse(nn.isStandbyState());
       assertFalse(nn.isInSafeMode());
@@ -514,13 +518,13 @@
       for (int i = 0; i < 20; i++) {
         // Loop the last check to suss out races.
         banner("Transition 4->2. Should stop secret manager.");
-        nn.getRpcServer().transitionToStandby();
+        nn.getRpcServer().transitionToStandby(REQ_INFO);
         assertTrue(nn.isStandbyState());
         assertFalse(nn.isInSafeMode());
         assertFalse(isDTRunning(nn));
     
         banner("Transition 2->4. Should start secret manager");
-        nn.getRpcServer().transitionToActive();
+        nn.getRpcServer().transitionToActive(REQ_INFO);
         assertFalse(nn.isStandbyState());
         assertFalse(nn.isInSafeMode());
         assertTrue(isDTRunning(nn));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java
index b976a9c..7bc49f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java
@@ -27,6 +27,8 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -111,7 +113,8 @@
     cluster.restartNameNode(1, true);
     
     // Make sure HA is working.
-    cluster.getNameNode(0).getRpcServer().transitionToActive();
+    cluster.getNameNode(0).getRpcServer().transitionToActive(
+        new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
     FileSystem fs = null;
     try {
       Path newPath = new Path(TEST_PATH, pathSuffix);
@@ -160,7 +163,7 @@
   @Test
   public void testInitializeSharedEditsConfiguresGenericConfKeys() {
     Configuration conf = new Configuration();
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "ns1");
+    conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
     conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
         "ns1"), "nn1,nn2");
     conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java
index ab2a8dd..e5b53ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java
@@ -22,6 +22,8 @@
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
 import org.apache.hadoop.ha.HealthCheckFailedException;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
index 815be59..a1e8f29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
@@ -85,12 +85,52 @@
   
   private static final int STRESS_NUM_THREADS = 25;
   private static final int STRESS_RUNTIME = 40000;
+  
+  enum TestScenario {
+    GRACEFUL_FAILOVER {
+      void run(MiniDFSCluster cluster) throws IOException {
+        cluster.transitionToStandby(0);
+        cluster.transitionToActive(1);
+      }
+    },
+    ORIGINAL_ACTIVE_CRASHED {
+      void run(MiniDFSCluster cluster) throws IOException {
+        cluster.restartNameNode(0);
+        cluster.transitionToActive(1);
+      }
+    };
+
+    abstract void run(MiniDFSCluster cluster) throws IOException;
+  }
+  
+  enum MethodToTestIdempotence {
+    ALLOCATE_BLOCK,
+    COMPLETE_FILE;
+  }
 
   /**
    * Tests continuing a write pipeline over a failover.
    */
   @Test(timeout=30000)
-  public void testWriteOverFailover() throws Exception {
+  public void testWriteOverGracefulFailover() throws Exception {
+    doWriteOverFailoverTest(TestScenario.GRACEFUL_FAILOVER,
+        MethodToTestIdempotence.ALLOCATE_BLOCK);
+  }
+  
+  @Test(timeout=30000)
+  public void testAllocateBlockAfterCrashFailover() throws Exception {
+    doWriteOverFailoverTest(TestScenario.ORIGINAL_ACTIVE_CRASHED,
+        MethodToTestIdempotence.ALLOCATE_BLOCK);
+  }
+
+  @Test(timeout=30000)
+  public void testCompleteFileAfterCrashFailover() throws Exception {
+    doWriteOverFailoverTest(TestScenario.ORIGINAL_ACTIVE_CRASHED,
+        MethodToTestIdempotence.COMPLETE_FILE);
+  }
+  
+  private void doWriteOverFailoverTest(TestScenario scenario,
+      MethodToTestIdempotence methodToTest) throws Exception {
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     // Don't check replication periodically.
@@ -102,6 +142,8 @@
       .numDataNodes(3)
       .build();
     try {
+      int sizeWritten = 0;
+      
       cluster.waitActive();
       cluster.transitionToActive(0);
       Thread.sleep(500);
@@ -112,28 +154,39 @@
       
       // write a block and a half
       AppendTestUtil.write(stm, 0, BLOCK_AND_A_HALF);
+      sizeWritten += BLOCK_AND_A_HALF;
       
       // Make sure all of the blocks are written out before failover.
       stm.hflush();
 
       LOG.info("Failing over to NN 1");
-      cluster.transitionToStandby(0);
-      cluster.transitionToActive(1);
+      scenario.run(cluster);
 
-      assertTrue(fs.exists(TEST_PATH));
+      // NOTE: explicitly do *not* make any further metadata calls
+      // to the NN here. The next IPC call should be to allocate the next
+      // block. Any other call would notice the failover and not test
+      // idempotence of the operation (HDFS-3031)
+      
       FSNamesystem ns1 = cluster.getNameNode(1).getNamesystem();
       BlockManagerTestUtil.updateState(ns1.getBlockManager());
       assertEquals(0, ns1.getPendingReplicationBlocks());
       assertEquals(0, ns1.getCorruptReplicaBlocks());
       assertEquals(0, ns1.getMissingBlocksCount());
 
-      // write another block and a half
-      AppendTestUtil.write(stm, BLOCK_AND_A_HALF, BLOCK_AND_A_HALF);
-
+      // If we're testing allocateBlock()'s idempotence, write another
+      // block and a half, so we have to allocate a new block.
+      // Otherise, don't write anything, so our next RPC will be
+      // completeFile() if we're testing idempotence of that operation.
+      if (methodToTest == MethodToTestIdempotence.ALLOCATE_BLOCK) {
+        // write another block and a half
+        AppendTestUtil.write(stm, sizeWritten, BLOCK_AND_A_HALF);
+        sizeWritten += BLOCK_AND_A_HALF;
+      }
+      
       stm.close();
       stm = null;
       
-      AppendTestUtil.check(fs, TEST_PATH, BLOCK_SIZE * 3);
+      AppendTestUtil.check(fs, TEST_PATH, sizeWritten);
     } finally {
       IOUtils.closeStream(stm);
       cluster.shutdown();
@@ -146,7 +199,18 @@
    * even when the pipeline was constructed on a different NN.
    */
   @Test(timeout=30000)
-  public void testWriteOverFailoverWithDnFail() throws Exception {
+  public void testWriteOverGracefulFailoverWithDnFail() throws Exception {
+    doTestWriteOverFailoverWithDnFail(TestScenario.GRACEFUL_FAILOVER);
+  }
+  
+  @Test(timeout=30000)
+  public void testWriteOverCrashFailoverWithDnFail() throws Exception {
+    doTestWriteOverFailoverWithDnFail(TestScenario.ORIGINAL_ACTIVE_CRASHED);
+  }
+
+  
+  private void doTestWriteOverFailoverWithDnFail(TestScenario scenario)
+      throws Exception {
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     
@@ -171,8 +235,7 @@
       stm.hflush();
 
       LOG.info("Failing over to NN 1");
-      cluster.transitionToStandby(0);
-      cluster.transitionToActive(1);
+      scenario.run(cluster);
 
       assertTrue(fs.exists(TEST_PATH));
       
@@ -183,8 +246,8 @@
       stm.hflush();
       
       LOG.info("Failing back to NN 0");
-      cluster.transitionToStandby(0);
-      cluster.transitionToActive(1);
+      cluster.transitionToStandby(1);
+      cluster.transitionToActive(0);
       
       cluster.stopDataNode(1);
       
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
index 5440c38c..3fa8910 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
@@ -21,6 +21,7 @@
 
 import java.io.File;
 import java.io.IOException;
+import java.io.OutputStream;
 import java.net.URI;
 import java.util.List;
 
@@ -36,6 +37,11 @@
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.util.Canceler;
+import org.apache.hadoop.io.compress.CompressionCodecFactory;
+import org.apache.hadoop.io.compress.CompressionOutputStream;
+import org.apache.hadoop.io.compress.GzipCodec;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -48,16 +54,22 @@
 
 public class TestStandbyCheckpoints {
   private static final int NUM_DIRS_IN_LOG = 200000;
-  private MiniDFSCluster cluster;
-  private NameNode nn0, nn1;
-  private FileSystem fs;
+  protected MiniDFSCluster cluster;
+  protected NameNode nn0, nn1;
+  protected FileSystem fs;
 
+  @SuppressWarnings("rawtypes")
   @Before
   public void setupCluster() throws Exception {
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5);
     conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+    conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
+    conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
+        SlowCodec.class.getCanonicalName());
+    CompressionCodecFactory.setCodecClasses(conf,
+        ImmutableList.<Class>of(SlowCodec.class));
 
     MiniDFSNNTopology topology = new MiniDFSNNTopology()
       .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
@@ -159,14 +171,15 @@
     
     // We should make exactly one checkpoint at this new txid. 
     Mockito.verify(spyImage1, Mockito.times(1))
-      .saveNamespace((FSNamesystem) Mockito.anyObject());       
+      .saveNamespace((FSNamesystem) Mockito.anyObject(),
+          (Canceler)Mockito.anyObject());       
   }
   
   /**
    * Test cancellation of ongoing checkpoints when failover happens
    * mid-checkpoint. 
    */
-  @Test
+  @Test(timeout=120000)
   public void testCheckpointCancellation() throws Exception {
     cluster.transitionToStandby(0);
     
@@ -191,16 +204,18 @@
 
     cluster.transitionToActive(0);    
     
-    for (int i = 0; i < 10; i++) {
+    boolean canceledOne = false;
+    for (int i = 0; i < 10 && !canceledOne; i++) {
       
       doEdits(i*10, i*10 + 10);
       cluster.transitionToStandby(0);
       cluster.transitionToActive(1);
       cluster.transitionToStandby(1);
       cluster.transitionToActive(0);
+      canceledOne = StandbyCheckpointer.getCanceledCount() > 0;
     }
     
-    assertTrue(StandbyCheckpointer.getCanceledCount() > 0);
+    assertTrue(canceledOne);
   }
 
   private void doEdits(int start, int stop) throws IOException {
@@ -209,5 +224,22 @@
       fs.mkdirs(p);
     }
   }
+  
+  /**
+   * A codec which just slows down the saving of the image significantly
+   * by sleeping a few milliseconds on every write. This makes it easy to
+   * catch the standby in the middle of saving a checkpoint.
+   */
+  public static class SlowCodec extends GzipCodec {
+    @Override
+    public CompressionOutputStream createOutputStream(OutputStream out)
+        throws IOException {
+      CompressionOutputStream ret = super.createOutputStream(out);
+      CompressionOutputStream spy = Mockito.spy(ret);
+      Mockito.doAnswer(new GenericTestUtils.SleepAnswer(2))
+        .when(spy).write(Mockito.<byte[]>any(), Mockito.anyInt(), Mockito.anyInt());
+      return spy;
+    }
+  }
 
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java
index d7f2ff9..169978e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java
@@ -42,7 +42,6 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -70,7 +69,7 @@
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
   }
 
-  @Test
+  @Test(timeout=60000)
   public void testStandbyIsHot() throws Exception {
     Configuration conf = new Configuration();
     // We read from the standby to watch block locations
@@ -111,6 +110,8 @@
       // Change replication
       LOG.info("Changing replication to 1");
       fs.setReplication(TEST_FILE_PATH, (short)1);
+      BlockManagerTestUtil.computeAllPendingWork(
+          nn1.getNamesystem().getBlockManager());
       waitForBlockLocations(cluster, nn1, TEST_FILE, 1);
 
       nn1.getRpcServer().rollEditLog();
@@ -121,6 +122,8 @@
       // Change back to 3
       LOG.info("Changing replication to 3");
       fs.setReplication(TEST_FILE_PATH, (short)3);
+      BlockManagerTestUtil.computeAllPendingWork(
+          nn1.getNamesystem().getBlockManager());
       nn1.getRpcServer().rollEditLog();
       
       LOG.info("Waiting for higher replication to show up on standby");
@@ -142,7 +145,7 @@
    * In the bug, the standby node would only very slowly notice the blocks returning
    * to the cluster.
    */
-  @Test
+  @Test(timeout=60000)
   public void testDatanodeRestarts() throws Exception {
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
@@ -224,17 +227,16 @@
           
           LOG.info("Got " + numReplicas + " locs: " + locs);
           if (numReplicas > expectedReplicas) {
-            for (DataNode dn : cluster.getDataNodes()) {
-              DataNodeTestUtils.triggerDeletionReport(dn);
-            }
+            cluster.triggerDeletionReports();
           }
+          cluster.triggerHeartbeats();
           return numReplicas == expectedReplicas;
         } catch (IOException e) {
           LOG.warn("No block locations yet: " + e.getMessage());
           return false;
         }
       }
-    }, 500, 10000);
+    }, 500, 20000);
     
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStateTransitionFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStateTransitionFailure.java
new file mode 100644
index 0000000..3af78a5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStateTransitionFailure.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.ha;
+
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ha.ServiceFailedException;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.junit.Test;
+
+/**
+ * Tests to verify the behavior of failing to fully start transition HA states.
+ */
+public class TestStateTransitionFailure {
+  
+  public static final Log LOG = LogFactory.getLog(TestStateTransitionFailure.class);
+
+  /**
+   * Ensure that a failure to fully transition to the active state causes a
+   * shutdown of the NameNode.
+   */
+  @Test
+  public void testFailureToTransitionCausesShutdown() throws IOException {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      // Set an illegal value for the trash emptier interval. This will cause
+      // the NN to fail to transition to the active state.
+      conf.setLong(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, -1);
+      cluster = new MiniDFSCluster.Builder(conf)
+          .nnTopology(MiniDFSNNTopology.simpleHATopology())
+          .numDataNodes(0)
+          .build();
+      cluster.waitActive();
+      Runtime mockRuntime = mock(Runtime.class);
+      cluster.getNameNode(0).setRuntimeForTesting(mockRuntime);
+      verify(mockRuntime, times(0)).exit(anyInt());
+      try {
+        cluster.transitionToActive(0);
+        fail("Transitioned to active but should not have been able to.");
+      } catch (ServiceFailedException sfe) {
+        assertExceptionContains("Error encountered requiring NN shutdown. " +
+            "Shutting down immediately.", sfe.getCause());
+        LOG.info("got expected exception", sfe.getCause());
+      }
+      verify(mockRuntime, times(1)).exit(anyInt());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
index 4c4d0f2..71dcce4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
@@ -20,6 +20,7 @@
 
 import static org.junit.Assert.*;
 
+import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.PrintStream;
@@ -32,14 +33,17 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
 import org.apache.hadoop.ha.HAServiceStatus;
 import org.apache.hadoop.ha.HAServiceTarget;
 import org.apache.hadoop.ha.HealthCheckFailedException;
-import org.apache.hadoop.ha.NodeFencer;
+import org.apache.hadoop.ha.ZKFCProtocol;
 import org.apache.hadoop.test.MockitoUtil;
 
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.ArgumentCaptor;
 import org.mockito.Mockito;
 
 import com.google.common.base.Charsets;
@@ -52,6 +56,7 @@
   private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
   private String errOutput;
   private HAServiceProtocol mockProtocol;
+  private ZKFCProtocol mockZkfcProtocol;
   
   private static final String NSID = "ns1";
 
@@ -59,13 +64,16 @@
     new HAServiceStatus(HAServiceState.STANDBY)
     .setReadyToBecomeActive();
   
+  private ArgumentCaptor<StateChangeRequestInfo> reqInfoCaptor =
+    ArgumentCaptor.forClass(StateChangeRequestInfo.class);
+  
   private static String HOST_A = "1.2.3.1";
   private static String HOST_B = "1.2.3.2";
 
   private HdfsConfiguration getHAConf() {
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, NSID);    
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, NSID);
+    conf.set(DFSConfigKeys.DFS_NAMESERVICES, NSID);    
+    conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, NSID);
     conf.set(DFSUtil.addKeySuffixes(
         DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, NSID), "nn1,nn2");    
     conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
@@ -81,6 +89,7 @@
   @Before
   public void setup() throws IOException {
     mockProtocol = MockitoUtil.mockProtocol(HAServiceProtocol.class);
+    mockZkfcProtocol = MockitoUtil.mockProtocol(ZKFCProtocol.class);
     tool = new DFSHAAdmin() {
 
       @Override
@@ -90,7 +99,9 @@
         // OVerride the target to return our mock protocol
         try {
           Mockito.doReturn(mockProtocol).when(spy).getProxy(
-              Mockito.<Configuration>any(), Mockito.anyInt()); 
+              Mockito.<Configuration>any(), Mockito.anyInt());
+          Mockito.doReturn(mockZkfcProtocol).when(spy).getZKFCProxy(
+              Mockito.<Configuration>any(), Mockito.anyInt());
         } catch (IOException e) {
           throw new AssertionError(e); // mock setup doesn't really throw
         }
@@ -139,13 +150,89 @@
   @Test
   public void testTransitionToActive() throws Exception {
     assertEquals(0, runTool("-transitionToActive", "nn1"));
-    Mockito.verify(mockProtocol).transitionToActive();
+    Mockito.verify(mockProtocol).transitionToActive(
+        reqInfoCaptor.capture());
+    assertEquals(RequestSource.REQUEST_BY_USER,
+        reqInfoCaptor.getValue().getSource());
+  }
+  
+  /**
+   * Test that, if automatic HA is enabled, none of the mutative operations
+   * will succeed, unless the -forcemanual flag is specified.
+   * @throws Exception
+   */
+  @Test
+  public void testMutativeOperationsWithAutoHaEnabled() throws Exception {
+    Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
+    
+    // Turn on auto-HA in the config
+    HdfsConfiguration conf = getHAConf();
+    conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
+    tool.setConf(conf);
+
+    // Should fail without the forcemanual flag
+    assertEquals(-1, runTool("-transitionToActive", "nn1"));
+    assertTrue(errOutput.contains("Refusing to manually manage"));
+    assertEquals(-1, runTool("-transitionToStandby", "nn1"));
+    assertTrue(errOutput.contains("Refusing to manually manage"));
+
+    Mockito.verify(mockProtocol, Mockito.never())
+      .transitionToActive(anyReqInfo());
+    Mockito.verify(mockProtocol, Mockito.never())
+      .transitionToStandby(anyReqInfo());
+
+    // Force flag should bypass the check and change the request source
+    // for the RPC
+    setupConfirmationOnSystemIn();
+    assertEquals(0, runTool("-transitionToActive", "-forcemanual", "nn1"));
+    setupConfirmationOnSystemIn();
+    assertEquals(0, runTool("-transitionToStandby", "-forcemanual", "nn1"));
+
+    Mockito.verify(mockProtocol, Mockito.times(1)).transitionToActive(
+        reqInfoCaptor.capture());
+    Mockito.verify(mockProtocol, Mockito.times(1)).transitionToStandby(
+        reqInfoCaptor.capture());
+    
+    // All of the RPCs should have had the "force" source
+    for (StateChangeRequestInfo ri : reqInfoCaptor.getAllValues()) {
+      assertEquals(RequestSource.REQUEST_BY_USER_FORCED, ri.getSource());
+    }
+  }
+
+  /**
+   * Setup System.in with a stream that feeds a "yes" answer on the
+   * next prompt.
+   */
+  private static void setupConfirmationOnSystemIn() {
+   // Answer "yes" to the prompt about transition to active
+   System.setIn(new ByteArrayInputStream("yes\n".getBytes()));
+  }
+
+  /**
+   * Test that, even if automatic HA is enabled, the monitoring operations
+   * still function correctly.
+   */
+  @Test
+  public void testMonitoringOperationsWithAutoHaEnabled() throws Exception {
+    Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
+
+    // Turn on auto-HA
+    HdfsConfiguration conf = getHAConf();
+    conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
+    tool.setConf(conf);
+
+    assertEquals(0, runTool("-checkHealth", "nn1"));
+    Mockito.verify(mockProtocol).monitorHealth();
+    
+    assertEquals(0, runTool("-getServiceState", "nn1"));
+    Mockito.verify(mockProtocol).getServiceStatus();
   }
 
   @Test
   public void testTransitionToStandby() throws Exception {
     assertEquals(0, runTool("-transitionToStandby", "nn1"));
-    Mockito.verify(mockProtocol).transitionToStandby();
+    Mockito.verify(mockProtocol).transitionToStandby(anyReqInfo());
   }
 
   @Test
@@ -213,6 +300,19 @@
     tool.setConf(conf);
     assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
   }
+  
+  @Test
+  public void testFailoverWithAutoHa() throws Exception {
+    Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
+    // Turn on auto-HA in the config
+    HdfsConfiguration conf = getHAConf();
+    conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
+    tool.setConf(conf);
+
+    assertEquals(0, runTool("-failover", "nn1", "nn2"));
+    Mockito.verify(mockZkfcProtocol).gracefulFailover();
+  }
 
   @Test
   public void testForceFenceOptionListedBeforeArgs() throws Exception {
@@ -283,4 +383,8 @@
     LOG.info("Output:\n" + errOutput);
     return ret;
   }
+  
+  private StateChangeRequestInfo anyReqInfo() {
+    return Mockito.<StateChangeRequestInfo>any();
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
index 93de1d2..d55a258 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
@@ -61,7 +61,7 @@
       }
       nsList.append(getNameServiceId(i));
     }
-    conf.set(DFS_FEDERATION_NAMESERVICES, nsList.toString());
+    conf.set(DFS_NAMESERVICES, nsList.toString());
   }
 
   /** Set a given key with value as address, for all the nameServiceIds.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index 3c6adc2..21b216d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -18,6 +18,10 @@
 package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
 import java.io.BufferedReader;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.security.token.Token;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.EOFException;
@@ -29,15 +33,19 @@
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
 import java.util.Set;
 
-import junit.framework.TestCase;
+import org.junit.*;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
@@ -52,12 +60,15 @@
  *   * confirm it correctly bails on malformed image files, in particular, a
  *     file that ends suddenly.
  */
-public class TestOfflineImageViewer extends TestCase {
+public class TestOfflineImageViewer {
+  private static final Log LOG = LogFactory.getLog(OfflineImageViewer.class);
   private static final int NUM_DIRS = 3;
   private static final int FILES_PER_DIR = 4;
+  private static final String TEST_RENEWER = "JobTracker";
+  private static File originalFsimage = null;
 
   // Elements of lines of ls-file output to be compared to FileStatus instance
-  private class LsElements {
+  private static class LsElements {
     public String perms;
     public int replication;
     public String username;
@@ -67,43 +78,28 @@
   }
   
   // namespace as written to dfs, to be compared with viewer's output
-  final HashMap<String, FileStatus> writtenFiles 
-                                           = new HashMap<String, FileStatus>();
-  
+  final static HashMap<String, FileStatus> writtenFiles = 
+      new HashMap<String, FileStatus>();
   
   private static String ROOT = System.getProperty("test.build.data",
                                                   "build/test/data");
   
-  // Main entry point into testing.  Necessary since we only want to generate
-  // the fsimage file once and use it for multiple tests. 
-  public void testOIV() throws Exception {
-    File originalFsimage = null;
-    try {
-    originalFsimage = initFsimage();
-    assertNotNull("originalFsImage shouldn't be null", originalFsimage);
-    
-    // Tests:
-    outputOfLSVisitor(originalFsimage);
-    outputOfFileDistributionVisitor(originalFsimage);
-    
-    unsupportedFSLayoutVersion(originalFsimage);
-    
-    truncatedFSImage(originalFsimage);
-    
-    } finally {
-      if(originalFsimage != null && originalFsimage.exists())
-        originalFsimage.delete();
-    }
-  }
-
   // Create a populated namespace for later testing.  Save its contents to a
   // data structure and store its fsimage location.
-  private File initFsimage() throws IOException {
+  // We only want to generate the fsimage file once and use it for
+  // multiple tests.
+  @BeforeClass
+  public static void createOriginalFSImage() throws IOException {
     MiniDFSCluster cluster = null;
-    File orig = null;
     try {
       Configuration conf = new HdfsConfiguration();
+      conf.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
+      conf.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
+      conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
+      conf.set("hadoop.security.auth_to_local",
+          "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+      cluster.waitActive();
       FileSystem hdfs = cluster.getFileSystem();
       
       int filesize = 256;
@@ -123,34 +119,49 @@
         }
       }
 
+      // Get delegation tokens so we log the delegation token op
+      List<Token<?>> delegationTokens = 
+          hdfs.getDelegationTokens(TEST_RENEWER);
+      for (Token<?> t : delegationTokens) {
+        LOG.debug("got token " + t);
+      }
+
       // Write results to the fsimage file
       cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER);
       cluster.getNameNodeRpc().saveNamespace();
       
       // Determine location of fsimage file
-      orig = FSImageTestUtil.findLatestImageFile(
+      originalFsimage = FSImageTestUtil.findLatestImageFile(
           FSImageTestUtil.getFSImage(
           cluster.getNameNode()).getStorage().getStorageDir(0));
-      if (orig == null) {
-        fail("Didn't generate or can't find fsimage");
+      if (originalFsimage == null) {
+        throw new RuntimeException("Didn't generate or can't find fsimage");
       }
+      LOG.debug("original FS image file is " + originalFsimage);
     } finally {
       if(cluster != null)
         cluster.shutdown();
     }
-    return orig;
+  }
+  
+  @AfterClass
+  public static void deleteOriginalFSImage() throws IOException {
+    if(originalFsimage != null && originalFsimage.exists()) {
+      originalFsimage.delete();
+    }
   }
   
   // Convenience method to generate a file status from file system for 
   // later comparison
-  private FileStatus pathToFileEntry(FileSystem hdfs, String file) 
+  private static FileStatus pathToFileEntry(FileSystem hdfs, String file) 
         throws IOException {
     return hdfs.getFileStatus(new Path(file));
   }
-
+  
   // Verify that we can correctly generate an ls-style output for a valid 
   // fsimage
-  private void outputOfLSVisitor(File originalFsimage) throws IOException {
+  @Test
+  public void outputOfLSVisitor() throws IOException {
     File testFile = new File(ROOT, "/basicCheck");
     File outputFile = new File(ROOT, "/basicCheckOutput");
     
@@ -169,12 +180,13 @@
       if(testFile.exists()) testFile.delete();
       if(outputFile.exists()) outputFile.delete();
     }
-    System.out.println("Correctly generated ls-style output.");
+    LOG.debug("Correctly generated ls-style output.");
   }
   
   // Confirm that attempting to read an fsimage file with an unsupported
   // layout results in an error
-  public void unsupportedFSLayoutVersion(File originalFsimage) throws IOException {
+  @Test
+  public void unsupportedFSLayoutVersion() throws IOException {
     File testFile = new File(ROOT, "/invalidLayoutVersion");
     File outputFile = new File(ROOT, "invalidLayoutVersionOutput");
     
@@ -190,7 +202,7 @@
       } catch(IOException e) {
         if(!e.getMessage().contains(Integer.toString(badVersionNum)))
           throw e; // wasn't error we were expecting
-        System.out.println("Correctly failed at reading bad image version.");
+        LOG.debug("Correctly failed at reading bad image version.");
       }
     } finally {
       if(testFile.exists()) testFile.delete();
@@ -199,7 +211,8 @@
   }
   
   // Verify that image viewer will bail on a file that ends unexpectedly
-  private void truncatedFSImage(File originalFsimage) throws IOException {
+  @Test
+  public void truncatedFSImage() throws IOException {
     File testFile = new File(ROOT, "/truncatedFSImage");
     File outputFile = new File(ROOT, "/trucnatedFSImageOutput");
     try {
@@ -213,7 +226,7 @@
         oiv.go();
         fail("Managed to process a truncated fsimage file");
       } catch (EOFException e) {
-        System.out.println("Correctly handled EOF");
+        LOG.debug("Correctly handled EOF");
       }
 
     } finally {
@@ -365,7 +378,8 @@
     }
   }
 
-  private void outputOfFileDistributionVisitor(File originalFsimage) throws IOException {
+  @Test
+  public void outputOfFileDistributionVisitor() throws IOException {
     File testFile = new File(ROOT, "/basicCheck");
     File outputFile = new File(ROOT, "/fileDistributionCheckOutput");
 
@@ -392,4 +406,66 @@
     }
     assertEquals(totalFiles, NUM_DIRS * FILES_PER_DIR);
   }
+  
+  private static class TestImageVisitor extends ImageVisitor {
+    private List<String> delegationTokenRenewers = new LinkedList<String>();
+    TestImageVisitor() {
+    }
+    
+    List<String> getDelegationTokenRenewers() {
+      return delegationTokenRenewers;
+    }
+
+    @Override
+    void start() throws IOException {
+    }
+
+    @Override
+    void finish() throws IOException {
+    }
+
+    @Override
+    void finishAbnormally() throws IOException {
+    }
+
+    @Override
+    void visit(ImageElement element, String value) throws IOException {
+      if (element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_RENEWER) {
+        delegationTokenRenewers.add(value);
+      }
+    }
+
+    @Override
+    void visitEnclosingElement(ImageElement element) throws IOException {
+    }
+
+    @Override
+    void visitEnclosingElement(ImageElement element, ImageElement key,
+        String value) throws IOException {
+    }
+
+    @Override
+    void leaveEnclosingElement() throws IOException {
+    }
+  }
+
+  @Test
+  public void outputOfTestVisitor() throws IOException {
+    File testFile = new File(ROOT, "/basicCheck");
+
+    try {
+      copyFile(originalFsimage, testFile);
+      TestImageVisitor v = new TestImageVisitor();
+      OfflineImageViewer oiv = new OfflineImageViewer(testFile.getPath(), v, true);
+      oiv.go();
+
+      // Validated stored delegation token identifiers.
+      List<String> dtrs = v.getDelegationTokenRenewers();
+      assertEquals(1, dtrs.size());
+      assertEquals(TEST_RENEWER, dtrs.get(0));
+    } finally {
+      if(testFile.exists()) testFile.delete();
+    }
+    LOG.debug("Passed TestVisitor validation.");
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java
index e890cae..1984136 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java
@@ -421,5 +421,48 @@
 
     LOG.info("Test other - DONE");
   }
+  
+  @Test
+  public void testGetElement() {
+    LightWeightHashSet<TestObject> objSet = new LightWeightHashSet<TestObject>();
+    TestObject objA = new TestObject("object A");
+    TestObject equalToObjA = new TestObject("object A");
+    TestObject objB = new TestObject("object B");
+    objSet.add(objA);
+    objSet.add(objB);
+    
+    assertSame(objA, objSet.getElement(objA));
+    assertSame(objA, objSet.getElement(equalToObjA));
+    assertSame(objB, objSet.getElement(objB));
+    assertNull(objSet.getElement(new TestObject("not in set")));
+  }
+  
+  /**
+   * Wrapper class which is used in
+   * {@link TestLightWeightHashSet#testGetElement()}
+   */
+  private static class TestObject {
+    private final String value;
+
+    public TestObject(String value) {
+      super();
+      this.value = value;
+    }
+
+    @Override
+    public int hashCode() {
+      return value.hashCode();
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      if (this == obj) return true;
+      if (obj == null) return false;
+      if (getClass() != obj.getClass())
+        return false;
+      TestObject other = (TestObject) obj;
+      return this.value.equals(other.value);
+    }
+  }
 
 }
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
index 3823822..843f422 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
@@ -123,9 +123,10 @@
       checkPermission(fs, "/aa/1/aa/2/aa/3", dirPerm);
 
       FsPermission filePerm = new FsPermission((short)0444);
-      FSDataOutputStream out = fs.create(new Path("/b1/b2/b3.txt"), filePerm,
+      Path p = new Path("/b1/b2/b3.txt");
+      FSDataOutputStream out = fs.create(p, filePerm,
           true, conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-          fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
+          fs.getDefaultReplication(p), fs.getDefaultBlockSize(p), null);
       out.write(123);
       out.close();
       checkPermission(fs, "/b1", inheritPerm);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-policy.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-policy.xml
index eb3f4bd..afbf420 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-policy.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-policy.xml
@@ -116,5 +116,11 @@
     <description>ACL for HAService protocol used by HAAdmin to manage the
       active and stand-by states of namenode.</description>
   </property>
+  <property>
+    <name>security.zkfc.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for access to the ZK Failover Controller
+    </description>
+  </property>
   
 </configuration>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
index 8f769ce..918c004 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
@@ -47,7 +47,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -68,7 +68,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -88,19 +88,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file4</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -121,7 +121,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -142,7 +142,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -162,19 +162,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir3</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir4</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -225,7 +225,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -246,7 +246,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -265,15 +265,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file3</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -292,15 +292,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file3</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -321,7 +321,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///user/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///user/dir1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -342,7 +342,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///user/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///user/dir1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -365,15 +365,15 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///user/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///user/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///user/dir2</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///user/dir2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///user/dir3</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///user/dir3</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -396,15 +396,15 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///user/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///user/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///user/dir2</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///user/dir2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///user/dir3</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///user/dir3</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -469,63 +469,63 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/dir2</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/dir2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/dir2</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/dir2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/dir2/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/dir2/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/dir2/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/dir2/file2</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -557,63 +557,63 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/dir2</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/dir2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/dir2</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/dir2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/dir2/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/dir2/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/dir2/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/dir2/file2</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -646,64 +646,64 @@
         <!-- JIRA?
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^/user/[a-z]*/dir0/dir1</expected-output>
+          <expected-output>^/user/[a-zA-z0-9]*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^/user/[a-z]*/dir0/dir2</expected-output>
+          <expected-output>^/user/[a-zA-z0-9]*/dir0/dir2</expected-output>
         </comparator>
        -->
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/dir2</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/dir2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/dir2</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/dir2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/dir2/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/dir2/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/dir2/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir2/dir2/file2</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -755,23 +755,23 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -791,11 +791,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -832,23 +832,23 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -868,11 +868,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -926,7 +926,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/\*/file</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/\*/file</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -947,11 +947,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -1578,11 +1578,11 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file[^1]</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file[^1]</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -1618,7 +1618,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -1652,7 +1652,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -1689,19 +1689,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file4</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -1738,11 +1738,11 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file[^1]</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file[^1]</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -1764,11 +1764,11 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file[^1]</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file[^1]</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -1787,7 +1787,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -1836,7 +1836,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file2</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -1872,15 +1872,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file3</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -1917,11 +1917,11 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file[^1]</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file[^1]</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -1940,7 +1940,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -1989,7 +1989,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file2</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2025,15 +2025,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file3</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2068,11 +2068,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir1/dir0</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir1/dir0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir1/dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir1/dir0/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2092,11 +2092,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir1/dir0</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir1/dir0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir1/dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir1/dir0/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2115,11 +2115,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2137,11 +2137,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2159,11 +2159,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2181,11 +2181,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2204,11 +2204,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2227,11 +2227,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2253,35 +2253,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file4</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2303,35 +2303,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file4</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2464,11 +2464,11 @@
       <comparators>
        <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
         </comparator>
         <comparator>
          <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2487,11 +2487,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2510,11 +2510,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file2</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2536,27 +2536,27 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file3</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2578,27 +2578,27 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file3</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2750,15 +2750,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir1/dir0</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir1/dir0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir1/dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir1/dir0/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2779,15 +2779,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dest/dir0</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dest/dir0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dest/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dest/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dest/dir2</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dest/dir2</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2808,15 +2808,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dest/dir0</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dest/dir0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dest/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dest/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dest/dir2</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dest/dir2</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2834,11 +2834,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2857,11 +2857,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2880,11 +2880,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file2</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2906,27 +2906,27 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file3</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -2948,27 +2948,27 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file3</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -3085,15 +3085,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir1/dir0</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir1/dir0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir1/dir0/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir1/dir0/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -3114,15 +3114,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dest/dir0</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dest/dir0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dest/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dest/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dest/dir2</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dest/dir2</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -3143,15 +3143,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dest/dir0</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dest/dir0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dest/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dest/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dest/dir2</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dest/dir2</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -8418,7 +8418,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -8440,7 +8440,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -8462,19 +8462,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -8496,19 +8496,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -8530,7 +8530,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -8552,7 +8552,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -8574,19 +8574,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -8608,19 +8608,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -8642,19 +8642,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -8676,19 +8676,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -8710,19 +8710,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -8744,19 +8744,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -8841,19 +8841,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -8874,19 +8874,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file3</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file4</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -8913,35 +8913,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -8968,35 +8968,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9017,19 +9017,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9050,19 +9050,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file3</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file4</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9089,35 +9089,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9144,35 +9144,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9193,19 +9193,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9226,19 +9226,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file3</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file4</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9265,35 +9265,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9320,35 +9320,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9369,19 +9369,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9402,19 +9402,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file3</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file4</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9441,35 +9441,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9496,35 +9496,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9551,35 +9551,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9606,35 +9606,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9661,35 +9661,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9716,35 +9716,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9771,35 +9771,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9826,35 +9826,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9881,35 +9881,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -9936,35 +9936,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10054,7 +10054,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10076,19 +10076,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10110,7 +10110,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10132,19 +10132,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10166,19 +10166,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10200,19 +10200,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10265,19 +10265,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file3</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file4</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10304,35 +10304,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10353,19 +10353,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file3</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file4</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10392,35 +10392,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10441,19 +10441,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file3</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file4</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10480,35 +10480,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10529,19 +10529,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file3</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file4</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10568,35 +10568,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10623,35 +10623,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10678,35 +10678,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10733,35 +10733,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10788,35 +10788,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10873,7 +10873,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10895,19 +10895,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10929,7 +10929,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10951,19 +10951,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -10985,19 +10985,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -11019,19 +11019,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -11084,19 +11084,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file3</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file4</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -11123,35 +11123,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -11172,19 +11172,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file3</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file4</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -11211,35 +11211,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -11260,19 +11260,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file3</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file4</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -11299,35 +11299,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -11348,19 +11348,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file3</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file4</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -11387,35 +11387,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -11442,35 +11442,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -11497,35 +11497,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -11552,35 +11552,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -11607,35 +11607,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -11718,15 +11718,15 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -11760,7 +11760,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -11836,35 +11836,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -11924,35 +11924,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -12167,15 +12167,15 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -12201,15 +12201,15 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -12243,7 +12243,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -12277,7 +12277,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -12402,35 +12402,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -12457,35 +12457,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -12578,35 +12578,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -12633,35 +12633,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -12997,15 +12997,15 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -13039,7 +13039,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -13115,35 +13115,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -13203,35 +13203,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -13398,7 +13398,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -13420,7 +13420,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -13442,19 +13442,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -13476,19 +13476,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
        </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -13510,19 +13510,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -13544,19 +13544,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -13609,19 +13609,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -13642,19 +13642,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file4</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -13681,35 +13681,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -13736,35 +13736,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -13785,19 +13785,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -13818,19 +13818,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file4</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -13857,35 +13857,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -13912,35 +13912,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -13967,35 +13967,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14022,35 +14022,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14077,35 +14077,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
         </comparator>
        <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14132,35 +14132,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14216,7 +14216,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14238,19 +14238,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14272,19 +14272,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14321,19 +14321,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file4</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14360,35 +14360,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14409,19 +14409,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file4</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14448,35 +14448,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14503,35 +14503,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-         <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
+         <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14558,35 +14558,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*hdfs:///dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14626,7 +14626,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14648,19 +14648,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14682,19 +14682,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14731,19 +14731,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file4</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14770,35 +14770,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14819,19 +14819,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file3</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file3</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file4</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/file4</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14858,35 +14858,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -14913,35 +14913,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-         <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+         <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-         <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
+         <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
         </comparator>
       </comparators>
    </test>
@@ -14968,35 +14968,35 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir0/file0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/dir1/file2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
+          <expected-output>^-rw-r--r--( )*1( )*[a-zA-z0-9]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/dir2/file0</expected-output>
         </comparator>
       </comparators>
     </test>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/HDFSPolicyProviderAspect.aj b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/HDFSPolicyProviderAspect.aj
deleted file mode 100644
index 4738c25..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/HDFSPolicyProviderAspect.aj
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import java.util.ArrayList;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.test.system.DaemonProtocol;
-import org.apache.hadoop.hdfs.test.system.DNProtocol;
-import org.apache.hadoop.hdfs.test.system.NNProtocol;
-import org.apache.hadoop.security.authorize.Service;
-import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
-
-/**
- * This aspect adds two HDFS Herriot specific protocols tp the list of 'authorized'
- * Herriot protocols.
- * Protocol descriptors i.e. 'security.nn.protocol.acl' have to be added to
- * <code>hadoop-policy.xml</code> if present
- */
-public privileged aspect HDFSPolicyProviderAspect {
-  private static final Log LOG = LogFactory
-      .getLog(HDFSPolicyProviderAspect.class);
-
-  ArrayList<Service> herriotHDFSServices = null;
-
-  pointcut updateHDFSServices() :
-    execution (public Service[] HDFSPolicyProvider.getServices());
-
-  Service[] around() : updateHDFSServices () {
-    herriotHDFSServices = new ArrayList<Service>();
-    for (Service s : HDFSPolicyProvider.hdfsServices) {
-      LOG.debug("Copying configured protocol to "
-          + s.getProtocol().getCanonicalName());
-      herriotHDFSServices.add(s);
-    }
-    herriotHDFSServices.add(new Service("security.daemon.protocol.acl",
-        DaemonProtocol.class));
-    herriotHDFSServices.add(new Service("security.nn.protocol.acl",
-        NNProtocol.class));
-    herriotHDFSServices.add(new Service("security.dn.protocol.acl",
-        DNProtocol.class));
-    final Service[] retArray = herriotHDFSServices
-        .toArray(new Service[herriotHDFSServices.size()]);
-    LOG.debug("Number of configured protocols to return: " + retArray.length);
-    return retArray;
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/server/datanode/DataNodeAspect.aj b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/server/datanode/DataNodeAspect.aj
deleted file mode 100644
index e2f3ec3..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/server/datanode/DataNodeAspect.aj
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.server.datanode;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.AbstractList;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.test.system.DNProtocol;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.test.system.DaemonProtocol;
-import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
-
-public privileged aspect DataNodeAspect {
-  declare parents : DataNode implements DNProtocol;
-
-  public Configuration DataNode.getDaemonConf() {
-    return super.getConf();
-  }
-
-  pointcut dnConstructorPointcut(Configuration conf, AbstractList<File> dirs,
-      SecureResources resources) :
-    call(DataNode.new(Configuration, AbstractList<File>, SecureResources))
-    && args(conf, dirs, resources);
-
-  after(Configuration conf, AbstractList<File> dirs, SecureResources resources)
-    returning (DataNode datanode):
-    dnConstructorPointcut(conf, dirs, resources) {
-    try {
-      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-      datanode.setUser(ugi.getShortUserName());
-    } catch (IOException e) {
-      datanode.LOG.warn("Unable to get the user information for the " +
-          "DataNode");
-    }
-    datanode.setReady(true);
-  }
-
-  pointcut getVersionAspect(String protocol, long clientVersion) :
-    execution(public long DataNode.getProtocolVersion(String ,
-      long) throws IOException) && args(protocol, clientVersion);
-
-  long around(String protocol, long clientVersion) :
-    getVersionAspect(protocol, clientVersion) {
-    if(protocol.equals(DaemonProtocol.class.getName())) {
-      return DaemonProtocol.versionID;
-    } else if(protocol.equals(DNProtocol.class.getName())) {
-      return DNProtocol.versionID;
-    } else {
-      return proceed(protocol, clientVersion);
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/server/namenode/NameNodeAspect.aj b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/server/namenode/NameNodeAspect.aj
deleted file mode 100644
index 068382d..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/server/namenode/NameNodeAspect.aj
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.server.namenode;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.test.system.NNProtocol;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.test.system.DaemonProtocol;
-
-public privileged aspect NameNodeAspect {
-  declare parents : NameNode implements NNProtocol;
-
-  // Namename doesn't store a copy of its configuration
-  // because it can be changed through the life cycle of the object
-  // So, the an exposed reference needs to be added and updated after
-  // new NameNode(Configuration conf) is complete
-  Configuration NameNode.configRef = null;
-
-  // Method simply assign a reference to the NameNode configuration object
-  void NameNode.setRef (Configuration conf) {
-    if (configRef == null)
-      configRef = conf;
-  }
-
-  public Configuration NameNode.getDaemonConf() {
-    return configRef;
-  }
-
-  pointcut nnConstructorPointcut(Configuration conf) :
-    call(NameNode.new(Configuration)) && args(conf);
-
-  after(Configuration conf) returning (NameNode namenode):
-    nnConstructorPointcut(conf) {
-    try {
-      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-      namenode.setUser(ugi.getShortUserName());
-    } catch (IOException e) {
-      namenode.LOG.warn("Unable to get the user information for the " +
-          "Jobtracker");
-    }
-    namenode.setRef(conf);
-    namenode.setReady(true);
-  }
-
-  pointcut getVersionAspect(String protocol, long clientVersion) :
-    execution(public long NameNode.getProtocolVersion(String ,
-      long) throws IOException) && args(protocol, clientVersion);
-
-  long around(String protocol, long clientVersion) :
-    getVersionAspect(protocol, clientVersion) {
-    if(protocol.equals(DaemonProtocol.class.getName())) {
-      return DaemonProtocol.versionID;
-    } else if(protocol.equals(NNProtocol.class.getName())) {
-      return NNProtocol.versionID;
-    } else {
-      return proceed(protocol, clientVersion);
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/conf/system-test-hdfs.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/conf/system-test-hdfs.xml
deleted file mode 100644
index 4e54062..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/conf/system-test-hdfs.xml
+++ /dev/null
@@ -1,147 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-<!-- Mandatory properties that are to be set and uncommented before running the tests -->
-
-<property>
-  <name>test.system.hdrc.hadoophome</name>
-  <value>$(TO_DO_HADOOP_INSTALL)/share/hadoop-current</value>
-  <description> This is the path to the home directory of the hadoop deployment.
-  </description>
-</property>
-<property>
-  <name>test.system.hdrc.hadoopconfdir</name>
-  <value>$(TO_DO_HADOOP_INSTALL)/conf/hadoop</value>
-  <description> This is the path to the configuration directory of the hadoop
-  cluster that is deployed.
-  </description>
-</property>
-
-<property>
-  <name>test.system.hdrc.dn.hostfile</name>
-  <value>slaves.localcopy.txt</value>
-  <description> File name containing the hostnames where the DataNodes are running.
-  </description>
-</property>
-
-<property>
-  <name>test.system.hdfs.clusterprocess.impl.class</name>
-  <value>org.apache.hadoop.hdfs.test.system.HDFSCluster$HDFSProcessManager</value>
-  <description>
-  Cluster process manager for the Hdfs subsystem of the cluster. The value
-  org.apache.hadoop.hdfs.test.system.HDFSCluster$MultiUserHDFSProcessManager can
-  be used to enable multi user support.
-  </description>
-</property>
-
-<property>
-   <name>test.system.hdrc.deployed.scripts.dir</name>
-   <value>./src/test/system/scripts</value>
-   <description>
-     This directory hosts the scripts in the deployed location where
-     the system test client runs.
-   </description>
-</property>
-
-<property>
-  <name>test.system.hdrc.hadoopnewconfdir</name>
-  <value>$(TO_DO_GLOBAL_TMP_DIR)/newconf</value>
-  <description>
-  The directory where the new config files will be copied to in all
-  the clusters is pointed out this directory. 
-  </description>
-</property>
-
-<property>
-  <name>test.system.hdrc.suspend.cmd</name>
-  <value>kill -SIGSTOP</value>
-  <description>
-    Command for suspending the given process.
-  </description>
-</property>
-
-<property>
-  <name>test.system.hdrc.resume.cmd</name>
-  <value>kill -SIGCONT</value>
-  <description>
-  Command for resuming the given suspended process.
-  </description>
-</property>
-<property>
-  <name>test.system.hdrc.hadoop.local.confdir</name>
-  <value>$(TO_DO_GLOBAL_TMP_DIR)/localconf</value>
-  <description>
-    A local directory where a new config file is placed before
-    being pushed into new config location on the cluster.
-  </description>
-</property>
-
-<!-- Mandatory keys to be set for the multi user support to be enabled.  -->
-
-<property>
-  <name>test.system.hdfs.clusterprocess.impl.class</name>
-  <value>org.apache.hadoop.hdfs.test.system.HDFSCluster$MultiUserHDFSProcessManager</value>
-  <description>
-    Enabling multi user based cluster process manger.
-  </description>
-</property>
-<property>
-  <name>test.system.hdrc.multi-user.list.path</name>
-  <value>$(TO_DO_HADOOP_INSTALL)/conf/hadoop/proxyusers</value>
-  <description>
-  Multi user list for creating the proxy users.
-  </description>
-</property>
-<property>
-  <name>test.system.hdrc.multi-user.binary.path</name>
-  <value>$(TO_DO_HADOOP_INSTALL)/conf/hadoop/runAs</value>
-  <description>
-    Local file system path on gate way to cluster-controller binary including the binary name.
-    To build the binary the following commands need to be executed:
-     % ant run-as -Drun-as.hadoop.home.dir=(HADOOP_PREFIX of setup cluster)
-     % cp build-fi/system/c++-build/runAs test.system.hdrc.multi-user.binary.path
-    Location of the cluster is important security precaution.
-    The binary should be owned by root and test user group permission should be set such a
-    way that it can be executed by binary. Example usage would be:
-     % sudo chown root binary
-     % sudo chmod 6511 binary
-    Change permission appropriately to make it more secure.
-  </description>
-</property>
-<property>
-  <name>test.system.hdrc.multi-user.managinguser.namenode</name>
-  <value>*</value>
-  <description>
-    User value for managing the particular daemon, please note that these user should be
-    present on gateways also, an example configuration for the above would be 
-    key name = test.system.hdrc.multi-user.managinguser.namenode
-    key value = guest
-    Please note the daemon names are all lower case, corresponding to hadoop-daemon.sh command.
-  </description>
-</property>
-<property>
-  <name>test.system.hdrc.multi-user.managinguser.datanode</name>
-  <value>*</value>
-</property>
- 
-</configuration>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/DNClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/DNClient.java
deleted file mode 100644
index 2376892..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/DNClient.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.test.system;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.test.system.process.RemoteProcess;
-
-/**
- * Datanode client for system tests. Assumption of the class is that the
- * configuration key is set for the configuration key : {@code
- * DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY} is set, only the port portion of
- * the address is used.
- */
-public class DNClient extends HDFSDaemonClient<DNProtocol> {
-
-  DNProtocol proxy;
-  private static final String HADOOP_DATANODE_OPTS_ENV = "HADOOP_DATANODE_OPTS";
-
-  public DNClient(Configuration conf, RemoteProcess process) throws IOException {
-    super(conf, process);
-  }
-
-  @Override
-  public void connect() throws IOException {
-    if (isConnected()) {
-      return;
-    }
-    String sockAddrStr = getConf().get(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY);
-    if (sockAddrStr == null) {
-      throw new IllegalArgumentException("Datenode IPC address is not set."
-          + "Check if " + DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY
-          + " is configured.");
-    }
-    String[] splits = sockAddrStr.split(":");
-    if (splits.length != 2) {
-      throw new IllegalArgumentException(
-          "Datanode IPC address is not correctly configured");
-    }
-    String port = splits[1];
-    String sockAddr = getHostName() + ":" + port;
-    InetSocketAddress bindAddr = NetUtils.createSocketAddr(sockAddr);
-    proxy = (DNProtocol) RPC.getProxy(DNProtocol.class, DNProtocol.versionID,
-        bindAddr, getConf());
-    setConnected(true);
-  }
-
-  @Override
-  public void disconnect() throws IOException {
-    RPC.stopProxy(proxy);
-    setConnected(false);
-  }
-
-  @Override
-  protected DNProtocol getProxy() {
-    return proxy;
-  }
-
-  public Configuration getDatanodeConfig() throws IOException {
-    return getProxy().getDaemonConf();
-  }
-
-  @Override
-  public String getHadoopOptsEnvName() {
-    return HADOOP_DATANODE_OPTS_ENV;
-  }
-
-  /**
-   * Concrete implementation of abstract super class method
-   * @param attributeName name of the attribute to be retrieved
-   * @return Object value of the given attribute
-   * @throws IOException is thrown in case of communication errors
-   */
-  @Override
-  public Object getDaemonAttribute (String attributeName) throws IOException {
-    return getJmxAttribute("DataNode", "DataNodeInfo", attributeName);
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/DNProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/DNProtocol.java
deleted file mode 100644
index 31bdd7f..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/DNProtocol.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.test.system;
-
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.security.KerberosInfo;
-import org.apache.hadoop.test.system.DaemonProtocol;
-
-/**
- * Client side API exposed from Datanode.
- * Actual implementations are likely to be injected
- *
- * The protocol has to be annotated so KerberosInfo can be filled in during
- * creation of a ipc.Client connection
- */
-@KerberosInfo(
-    serverPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
-public interface DNProtocol extends DaemonProtocol {
-  public static final long versionID = 1L;
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSCluster.java
deleted file mode 100644
index d9504f8..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSCluster.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.test.system;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Iterator;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.test.system.AbstractDaemonClient;
-import org.apache.hadoop.test.system.AbstractDaemonCluster;
-import org.apache.hadoop.test.system.process.ClusterProcessManager;
-import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster;
-import org.apache.hadoop.test.system.process.MultiUserHadoopDaemonRemoteCluster;
-import org.apache.hadoop.test.system.process.RemoteProcess;
-import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster.HadoopDaemonInfo;
-
-public class HDFSCluster extends AbstractDaemonCluster {
-
-  static {
-    Configuration.addDefaultResource("hdfs-site.xml");
-  }
-
-  private static final Log LOG = LogFactory.getLog(HDFSCluster.class);
-  public static final String CLUSTER_PROCESS_MGR_IMPL =
-    "test.system.hdfs.clusterprocess.impl.class";
-
-  private HDFSCluster(Configuration conf, ClusterProcessManager rCluster)
-    throws IOException {
-    super(conf, rCluster);
-  }
-
-  /**
-   * Key is used to to point to the file containing hostnames of tasktrackers
-   */
-  public static final String CONF_HADOOP_DN_HOSTFILE_NAME =
-    "test.system.hdrc.dn.hostfile";
-
-  private static List<HadoopDaemonInfo> hdfsDaemonInfos;
-
-  private static String nnHostName;
-  private static String DN_hostFileName;
-
-  protected enum Role {NN, DN}
-
-  @Override
-  protected AbstractDaemonClient
-    createClient(RemoteProcess process) throws IOException {
-    Enum<?> pRole = process.getRole();
-    if (Role.NN.equals(pRole)) {
-      return createNNClient(process);
-    } else if (Role.DN.equals(pRole)) {
-      return createDNClient(process);
-    } else throw new IOException("Role " + pRole +
-      " is not supported by HDFSCluster");
-  }
-
-  protected DNClient createDNClient(RemoteProcess dnDaemon) throws IOException {
-    return new DNClient(getConf(), dnDaemon);
-  }
-
-  protected NNClient createNNClient(RemoteProcess nnDaemon) throws IOException {
-    return new NNClient(getConf(), nnDaemon);
-  }
-
-  public NNClient getNNClient () {
-    Iterator<AbstractDaemonClient> iter = getDaemons().get(Role.NN).iterator();
-    return (NNClient) iter.next();
-  }
-
-  public List<DNClient> getDNClients () {
-    return (List) getDaemons().get(Role.DN);
-  }
-
-  public DNClient getDNClient (String hostname) {
-    for (DNClient dnC : getDNClients()) {
-      if (dnC.getHostName().equals(hostname))
-        return dnC;
-    }
-    return null;
-  }
-
-  public static class HDFSProcessManager extends HadoopDaemonRemoteCluster {
-    public HDFSProcessManager() {
-      super(hdfsDaemonInfos);
-    }
-  }
-
-  public static class MultiUserHDFSProcessManager
-      extends MultiUserHadoopDaemonRemoteCluster {
-    public MultiUserHDFSProcessManager() {
-      super(hdfsDaemonInfos);
-    }
-  }
-
-
-  public static HDFSCluster createCluster(Configuration conf) throws Exception {
-    conf.addResource("system-test.xml");
-    String sockAddrStr = FileSystem.getDefaultUri(conf).getAuthority();
-    if (sockAddrStr == null) {
-      throw new IllegalArgumentException("Namenode IPC address is not set");
-    }
-    String[] splits = sockAddrStr.split(":");
-    if (splits.length != 2) {
-      throw new IllegalArgumentException(
-          "Namenode report IPC is not correctly configured");
-    }
-    nnHostName = splits[0];
-    DN_hostFileName = conf.get(CONF_HADOOP_DN_HOSTFILE_NAME, "slaves");
-
-    hdfsDaemonInfos = new ArrayList<HadoopDaemonInfo>();
-    hdfsDaemonInfos.add(new HadoopDaemonInfo("namenode", 
-        Role.NN, Arrays.asList(new String[]{nnHostName})));
-    hdfsDaemonInfos.add(new HadoopDaemonInfo("datanode", 
-        Role.DN, DN_hostFileName));
-    
-    String implKlass = conf.get(CLUSTER_PROCESS_MGR_IMPL);
-    if (implKlass == null || implKlass.isEmpty()) {
-      implKlass = HDFSCluster.HDFSProcessManager.class.getName();
-    }
-    Class<ClusterProcessManager> klass =
-      (Class<ClusterProcessManager>) Class.forName(implKlass);
-    ClusterProcessManager clusterProcessMgr = klass.newInstance();
-    LOG.info("Created ClusterProcessManager as " + implKlass);
-    clusterProcessMgr.init(conf);
-    return new HDFSCluster(conf, clusterProcessMgr);
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java
deleted file mode 100644
index 4316b36..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.test.system;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.test.system.AbstractDaemonClient;
-import org.apache.hadoop.test.system.DaemonProtocol;
-import org.apache.hadoop.test.system.process.RemoteProcess;
-
-public abstract class HDFSDaemonClient<PROXY extends DaemonProtocol>
-  extends AbstractDaemonClient<PROXY> {
-
-  public HDFSDaemonClient(Configuration conf, RemoteProcess process)
-      throws IOException {
-    super(conf, process);
-  }
-
-  public String[] getHDFSDataDirs() throws IOException {
-    return getProxy().getDaemonConf().getStrings(
-        DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
-  }
-
-  public String getHDFSNameDirs() throws IOException {
-    return getProxy().getDaemonConf().getStrings(
-        DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY)[0];
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/NNClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/NNClient.java
deleted file mode 100644
index 79be0e1..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/NNClient.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.test.system;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.test.system.process.RemoteProcess;
-
-public class NNClient extends HDFSDaemonClient<NNProtocol> {
-  
-  NNProtocol proxy;
-  private static final String HADOOP_NAMENODE_OPTS_ENV = "HADOOP_NAMENODE_OPTS";
-
-  public NNClient(Configuration conf, RemoteProcess process) throws IOException {
-    super(conf, process);
-  }
-
-  @Override
-  public void connect() throws IOException {
-    if (isConnected())
-      return;
-    String sockAddrStr = FileSystem.getDefaultUri(getConf()).getAuthority();
-    if (sockAddrStr == null) {
-      throw new IllegalArgumentException("Namenode IPC address is not set");
-    }
-    String[] splits = sockAddrStr.split(":");
-    if (splits.length != 2) {
-      throw new IllegalArgumentException(
-          "Namenode report IPC is not correctly configured");
-    }
-    String port = splits[1];
-    String sockAddr = getHostName() + ":" + port;
-
-    InetSocketAddress bindAddr = NetUtils.createSocketAddr(sockAddr);
-    proxy = (NNProtocol) RPC.getProxy(NNProtocol.class, NNProtocol.versionID,
-        bindAddr, getConf());
-    setConnected(true);
-  }
-
-  @Override
-  public void disconnect() throws IOException {
-    RPC.stopProxy(proxy);
-    setConnected(false);
-  }
-
-  @Override
-  protected NNProtocol getProxy() {
-    return proxy;
-  }
-
-  @Override
-  public String getHadoopOptsEnvName() {
-    return HADOOP_NAMENODE_OPTS_ENV;
-  }
-
-  /**
-   * Concrete implementation of abstract super class method
-   * @param attributeName name of the attribute to be retrieved
-   * @return Object value of the given attribute
-   * @throws IOException is thrown in case of communication errors
-   */
-  @Override
-  public Object getDaemonAttribute (String attributeName) throws IOException {
-    return getJmxAttribute("NameNode", "NameNodeInfo", attributeName);
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/NNProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/NNProtocol.java
deleted file mode 100644
index 2665d23..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/NNProtocol.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.test.system;
-
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.security.KerberosInfo;
-import org.apache.hadoop.test.system.DaemonProtocol;
-
-/**
- * Client side API exposed from Namenode.
- * Actual implementations are likely to be injected
- *
- * The protocol has to be annotated so KerberosInfo can be filled in during
- * creation of a ipc.Client connection
- */
-@KerberosInfo(
-    serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
-public interface NNProtocol extends DaemonProtocol {
-  public static final long versionID = 1L;
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/test/org/apache/hadoop/hdfs/TestHL040.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/test/org/apache/hadoop/hdfs/TestHL040.java
deleted file mode 100644
index 43dcae5..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/test/org/apache/hadoop/hdfs/TestHL040.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Map;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.test.system.DNClient;
-import org.apache.hadoop.hdfs.test.system.HDFSCluster;
-import org.apache.hadoop.hdfs.test.system.NNClient;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mortbay.util.ajax.JSON;
-
-public class TestHL040 {
-  private HDFSCluster cluster = null;
-  private static final Log LOG = LogFactory.getLog(TestHL040.class);
-
-  public TestHL040() throws Exception {
-  }
-
-  @Before
-  public void setupUp() throws Exception {
-    cluster = HDFSCluster.createCluster(new Configuration());
-    cluster.setUp();
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    cluster.tearDown();
-  }
-
-  @Test
-  public void testConnect() throws IOException {
-    LOG.info("Staring TestHL040: connecting to the HDFSCluster ");
-    LOG.info("================ Getting namenode info ================");
-    NNClient dfsMaster = cluster.getNNClient();
-    LOG.info("Process info of namenode " + dfsMaster.getHostName() + " is: " +
-        dfsMaster.getProcessInfo());
-    LOG.info("================ Getting datanode info ================");
-    Collection<DNClient> clients = cluster.getDNClients();
-    for (DNClient dnC : clients) {
-      LOG.info("Process info of datanode " + dnC.getHostName() + " is: " +
-          dnC.getProcessInfo());
-      Assert.assertNotNull("Datanode process info isn't suppose to be null",
-          dnC.getProcessInfo());
-      LOG.info("Free space " + getFreeSpace(dnC));
-    }
-  }
-
-  private long getFreeSpace(DNClient dnC) throws IOException {
-    Object volObj = dnC.getDaemonAttribute("VolumeInfo");
-    Assert.assertNotNull("Attribute value is expected to be not null", volObj);
-    LOG.debug("Got object: " + volObj);
-    Map volInfoMap = (Map) JSON.parse(volObj.toString());
-    long totalFreeSpace = 0L;
-    for (Object key : volInfoMap.keySet()) {
-      Map attrMap = (Map) volInfoMap.get(key);
-      long freeSpace = (Long) attrMap.get("freeSpace");
-      totalFreeSpace += freeSpace;
-    }
-    return totalFreeSpace;
-  }
-}
diff --git a/hadoop-hdfs-project/pom.xml b/hadoop-hdfs-project/pom.xml
index ea77591..0e2684b 100644
--- a/hadoop-hdfs-project/pom.xml
+++ b/hadoop-hdfs-project/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 7b8e045..56a036f 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -52,6 +52,12 @@
 
     HADOOP-8285 MR changes for Use ProtoBuf for RpcPayLoadHeader (sanjay radia)
 
+    MAPREDUCE-3302. Remove the last dependency call from
+    org.apache.hadoop.record package in MR. (harsh)
+
+    MAPREDUCE-2384. The job submitter should make sure to validate
+    jobs before creation of necessary files. (harsh)
+
   BUG FIXES
 
     MAPREDUCE-4100. [Gridmix] Bug fixed in compression emulation feature for 
@@ -109,7 +115,58 @@
     MAPREDUCE-1740. NPE in getMatchingLevelForNodes when node locations are 
     variable depth (ahmed via tucu) [IMPORTANT: this is dead code in trunk]
 
-Release 2.0.0 - UNRELEASED
+    MAPREDUCE-3990. MRBench allows Long-sized input-lines value
+    but parses CLI argument as an Integer. (harsh)
+
+Release 2.0.1-alpha - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+    MAPREDUCE-4146. Support limits on task status string length and number of
+    block locations in branch-2. (Ahmed Radwan via tomwhite)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    MAPREDUCE-4148. MapReduce should not have a compile-time dependency on
+    HDFS. (tomwhite)
+
+    MAPREDUCE-4250. hadoop-config.sh missing variable exports, causes Yarn 
+    jobs to fail with ClassNotFoundException MRAppMaster. (phunt via tucu)
+
+    MAPREDUCE-4002. MultiFileWordCount job fails if the input path is not
+    from default file system. (Bhallamudi Venkata Siva Kamesh via todd)
+
+    MAPREDUCE-4274 MapOutputBuffer should use native byte order for kvmeta.
+    (todd via bobby)
+
+    MAPREDUCE-4262. NM gives wrong log message saying "Connected to 
+    ResourceManager" before trying to connect. (Devaraj K via tgraves)
+
+    MAPREDUCE-4276. Allow setting yarn.nodemanager.delete.debug-delay-sec 
+    property to "-1" for easier container debugging. (ahmed via tucu)
+ 
+    MAPREDUCE-4224. TestFifoScheduler throws 
+    org.apache.hadoop.metrics2.MetricsException (Devaraj K via tgraves)
+
+    MAPREDUCE-3493. Add the default mapreduce.shuffle.port property
+    to mapred-default.xml (Madhukara Phatak via harsh)
+
+    MAPREDUCE-4307. TeraInputFormat calls FileSystem.getDefaultBlockSize()
+    without a Path - Failure when using ViewFileSystem. (Ahmed Radwan via eli)
+
+    MAPREDUCE-4313. TestTokenCache doesn't compile due 
+    TokenCache.getDelegationToken compilation error (bobby)
+
+    MAPREDUCE-3873. Fixed NodeManagers' decommissioning at RM to accept IP
+    addresses also. (xieguiming via vinodkv)
+
+Release 2.0.0-alpha - 05-23-2012
 
   INCOMPATIBLE CHANGES
 
@@ -326,6 +383,9 @@
 
     MAPREDUCE-4162. Correctly set token service (Daryn Sharp via bobby)
 
+    MAPREDUCE-4301. Dedupe some strings in MRAM for memory savings 
+    (bobby via tgraves)
+
   OPTIMIZATIONS
 
     MAPREDUCE-3850. Avoid redundant calls for tokens in TokenCache (Daryn
@@ -478,6 +538,36 @@
     MAPREDUCE-4237. TestNodeStatusUpdater can fail if localhost has a domain
     associated with it (bobby)
 
+    MAPREDUCE-4233. NPE can happen in RMNMNodeInfo. (bobby)
+
+    MAPREDUCE-4238. mavenize data_join. (tgraves)
+
+    MAPREDUCE-4102. job counters not available in Jobhistory webui for 
+    killed jobs (Bhallamudi Venkata Siva Kamesh via tgraves)
+
+    MAPREDUCE-3543. Mavenize Gridmix. (tgraves)
+
+    MAPREDUCE-4197. Include the hsqldb jar in the hadoop-mapreduce tar 
+    file (Ravi Prakash via tgraves)
+
+    MAPREDUCE-4269. documentation: Gridmix has javadoc warnings in 
+    StressJobFactory (Jonathon Eagles via tgraves).
+
+    MAPREDUCE-3870. Invalid App Metrics 
+    (Bhallamudi Venkata Siva Kamesh via tgraves).
+
+    MAPREDUCE-4152. map task left hanging after AM dies trying to connect to RM
+    (Tom Graves via bobby)
+
+    MAPREDUCE-4297. Usersmap file in gridmix should not fail on empty lines
+    (Ravi Prakash via bobby)
+
+    MAPREDUCE-4302. NM goes down if error encountered during log aggregation 
+    (Daryn Sharp via bobby)
+
+    MAPREDUCE-3350. Per-app RM page should have the list of application-attempts
+    like on the app JHS page (Jonathon Eagles via tgraves)
+
 Release 0.23.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh b/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
index 4cd6eb1..ed2eef0 100644
--- a/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
+++ b/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
@@ -94,6 +94,7 @@
 export HADOOP_JHS_LOGGER=${HADOOP_JHS_LOGGER:-INFO,JSA}
 log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out
 pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid
+YARN_STOP_TIMEOUT=${YARN_STOP_TIMEOUT:-5}
 
 # Set default scheduling priority
 if [ "$YARN_NICENESS" = "" ]; then
@@ -129,9 +130,15 @@
   (stop)
 
     if [ -f $pid ]; then
-      if kill -0 `cat $pid` > /dev/null 2>&1; then
+      TARGET_PID=`cat $pid`
+      if kill -0 $TARGET_PID > /dev/null 2>&1; then
         echo stopping $command
-        kill `cat $pid`
+        kill $TARGET_PID
+        sleep $YARN_STOP_TIMEOUT
+        if kill -0 $TARGET_PID > /dev/null 2>&1; then
+          echo "$command did not stop gracefully after $YARN_STOP_TIMEOUT seconds: killing with kill -9"
+          kill -9 $TARGET_PID
+        fi
       else
         echo no $command to stop
       fi
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
index 1bee726..c16df60 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
index 7ac334c..cafff92 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
@@ -253,6 +253,10 @@
      .addTransition(TaskAttemptState.RUNNING,
          TaskAttemptState.FAIL_CONTAINER_CLEANUP,
          TaskAttemptEventType.TA_TIMED_OUT, CLEANUP_CONTAINER_TRANSITION)
+     // if container killed by AM shutting down
+     .addTransition(TaskAttemptState.RUNNING,
+         TaskAttemptState.KILLED,
+         TaskAttemptEventType.TA_CONTAINER_CLEANED, new KilledTransition())
      // Kill handling
      .addTransition(TaskAttemptState.RUNNING,
          TaskAttemptState.KILL_CONTAINER_CLEANUP, TaskAttemptEventType.TA_KILL,
@@ -272,6 +276,10 @@
      .addTransition(TaskAttemptState.COMMIT_PENDING,
          TaskAttemptState.KILL_CONTAINER_CLEANUP, TaskAttemptEventType.TA_KILL,
          CLEANUP_CONTAINER_TRANSITION)
+     // if container killed by AM shutting down
+     .addTransition(TaskAttemptState.COMMIT_PENDING,
+         TaskAttemptState.KILLED,
+         TaskAttemptEventType.TA_CONTAINER_CLEANED, new KilledTransition())
      .addTransition(TaskAttemptState.COMMIT_PENDING,
          TaskAttemptState.FAIL_CONTAINER_CLEANUP,
          TaskAttemptEventType.TA_FAILMSG, CLEANUP_CONTAINER_TRANSITION)
@@ -363,6 +371,7 @@
              TaskAttemptEventType.TA_COMMIT_PENDING,
              TaskAttemptEventType.TA_DONE,
              TaskAttemptEventType.TA_FAILMSG,
+             TaskAttemptEventType.TA_CONTAINER_CLEANED,
              // Container launch events can arrive late
              TaskAttemptEventType.TA_CONTAINER_LAUNCHED,
              TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED))
@@ -384,6 +393,7 @@
              TaskAttemptEventType.TA_COMMIT_PENDING,
              TaskAttemptEventType.TA_DONE,
              TaskAttemptEventType.TA_FAILMSG,
+             TaskAttemptEventType.TA_CONTAINER_CLEANED,
              // Container launch events can arrive late
              TaskAttemptEventType.TA_CONTAINER_LAUNCHED,
              TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED))
@@ -402,6 +412,7 @@
          TaskAttemptState.SUCCEEDED,
          EnumSet.of(TaskAttemptEventType.TA_KILL,
              TaskAttemptEventType.TA_FAILMSG,
+             TaskAttemptEventType.TA_CONTAINER_CLEANED,
              TaskAttemptEventType.TA_CONTAINER_COMPLETED))
 
      // Transitions from FAILED state
@@ -417,6 +428,7 @@
              // Container launch events can arrive late
              TaskAttemptEventType.TA_CONTAINER_LAUNCHED,
              TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED,
+             TaskAttemptEventType.TA_CONTAINER_CLEANED,
              TaskAttemptEventType.TA_COMMIT_PENDING,
              TaskAttemptEventType.TA_DONE,
              TaskAttemptEventType.TA_FAILMSG))
@@ -434,6 +446,7 @@
              // Container launch events can arrive late
              TaskAttemptEventType.TA_CONTAINER_LAUNCHED,
              TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED,
+             TaskAttemptEventType.TA_CONTAINER_CLEANED,
              TaskAttemptEventType.TA_COMMIT_PENDING,
              TaskAttemptEventType.TA_DONE,
              TaskAttemptEventType.TA_FAILMSG))
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
index 44dd16d..3144ab1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
@@ -82,10 +82,12 @@
       new LinkedBlockingQueue<ContainerLauncherEvent>();
   YarnRPC rpc;
 
-  private Container getContainer(ContainerId id) {
+  private Container getContainer(ContainerLauncherEvent event) {
+    ContainerId id = event.getContainerID();
     Container c = containers.get(id);
     if(c == null) {
-      c = new Container();
+      c = new Container(event.getTaskAttemptID(), event.getContainerID(),
+          event.getContainerMgrAddress(), event.getContainerToken());
       Container old = containers.putIfAbsent(id, c);
       if(old != null) {
         c = old;
@@ -107,9 +109,19 @@
 
   private class Container {
     private ContainerState state;
+    // store enough information to be able to cleanup the container
+    private TaskAttemptId taskAttemptID;
+    private ContainerId containerID;
+    final private String containerMgrAddress;
+    private ContainerToken containerToken;
     
-    public Container() {
+    public Container(TaskAttemptId taId, ContainerId containerID,
+        String containerMgrAddress, ContainerToken containerToken) {
       this.state = ContainerState.PREP;
+      this.taskAttemptID = taId;
+      this.containerMgrAddress = containerMgrAddress;
+      this.containerID = containerID;
+      this.containerToken = containerToken;
     }
     
     public synchronized boolean isCompletelyDone() {
@@ -118,7 +130,6 @@
     
     @SuppressWarnings("unchecked")
     public synchronized void launch(ContainerRemoteLaunchEvent event) {
-      TaskAttemptId taskAttemptID = event.getTaskAttemptID();
       LOG.info("Launching " + taskAttemptID);
       if(this.state == ContainerState.KILLED_BEFORE_LAUNCH) {
         state = ContainerState.DONE;
@@ -127,15 +138,10 @@
         return;
       }
       
-
-      final String containerManagerBindAddr = event.getContainerMgrAddress();
-      ContainerId containerID = event.getContainerID();
-      ContainerToken containerToken = event.getContainerToken();
-
       ContainerManager proxy = null;
       try {
 
-        proxy = getCMProxy(containerID, containerManagerBindAddr,
+        proxy = getCMProxy(containerID, containerMgrAddress,
             containerToken);
 
         // Construct the actual Container
@@ -181,35 +187,35 @@
     }
     
     @SuppressWarnings("unchecked")
-    public synchronized void kill(ContainerLauncherEvent event) {
+    public synchronized void kill() {
+
+      if(isCompletelyDone()) { 
+        return;
+      }
       if(this.state == ContainerState.PREP) {
         this.state = ContainerState.KILLED_BEFORE_LAUNCH;
       } else {
-        final String containerManagerBindAddr = event.getContainerMgrAddress();
-        ContainerId containerID = event.getContainerID();
-        ContainerToken containerToken = event.getContainerToken();
-        TaskAttemptId taskAttemptID = event.getTaskAttemptID();
         LOG.info("KILLING " + taskAttemptID);
 
         ContainerManager proxy = null;
         try {
-          proxy = getCMProxy(containerID, containerManagerBindAddr,
-              containerToken);
+          proxy = getCMProxy(this.containerID, this.containerMgrAddress,
+              this.containerToken);
 
             // kill the remote container if already launched
             StopContainerRequest stopRequest = Records
               .newRecord(StopContainerRequest.class);
-            stopRequest.setContainerId(event.getContainerID());
+            stopRequest.setContainerId(this.containerID);
             proxy.stopContainer(stopRequest);
 
         } catch (Throwable t) {
 
           // ignore the cleanup failure
           String message = "cleanup failed for container "
-            + event.getContainerID() + " : "
+            + this.containerID + " : "
             + StringUtils.stringifyException(t);
           context.getEventHandler().handle(
-            new TaskAttemptDiagnosticsUpdateEvent(taskAttemptID, message));
+            new TaskAttemptDiagnosticsUpdateEvent(this.taskAttemptID, message));
           LOG.warn(message);
         } finally {
           if (proxy != null) {
@@ -220,10 +226,11 @@
       }
       // after killing, send killed event to task attempt
       context.getEventHandler().handle(
-          new TaskAttemptEvent(event.getTaskAttemptID(),
+          new TaskAttemptEvent(this.taskAttemptID,
               TaskAttemptEventType.TA_CONTAINER_CLEANED));
     }
   }
+
   // To track numNodes.
   Set<String> allNodes = new HashSet<String>();
 
@@ -308,7 +315,17 @@
     super.start();
   }
 
+  private void shutdownAllContainers() {
+    for (Container ct : this.containers.values()) {
+      if (ct != null) {
+        ct.kill();
+      }
+    }
+  }
+
   public void stop() {
+    // shutdown any containers that might be left running
+    shutdownAllContainers();
     eventHandlingThread.interrupt();
     launcherPool.shutdownNow();
     super.stop();
@@ -364,7 +381,7 @@
       // TODO: Do it only once per NodeManager.
       ContainerId containerID = event.getContainerID();
 
-      Container c = getContainer(containerID);
+      Container c = getContainer(event);
       switch(event.getType()) {
 
       case CONTAINER_REMOTE_LAUNCH:
@@ -374,7 +391,7 @@
         break;
 
       case CONTAINER_REMOTE_CLEANUP:
-        c.kill(event);
+        c.kill();
         break;
       }
       removeContainerIfDone(containerID);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
index ec02ef5..248713c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
@@ -69,7 +69,7 @@
       return;
     }
     
-    if(total == null || total.getGroupNames() == null) {
+    if(total == null || total.getGroupNames() == null || total.countCounters() == 0) {
       String type = $(TASK_ID);
       if(type == null || type.isEmpty()) {
         type = $(JOB_ID, "the job");
@@ -180,14 +180,25 @@
     // Get all types of counters
     Map<TaskId, Task> tasks = job.getTasks();
     total = job.getAllCounters();
+    boolean needTotalCounters = false;
+    if (total == null) {
+      total = new Counters();
+      needTotalCounters = true;
+    }
     map = new Counters();
     reduce = new Counters();
     for (Task t : tasks.values()) {
       Counters counters = t.getCounters();
+      if (counters == null) {
+        continue;
+      }
       switch (t.getType()) {
         case MAP:     map.incrAllCounters(counters);     break;
         case REDUCE:  reduce.incrAllCounters(counters);  break;
       }
+      if (needTotalCounters) {
+        total.incrAllCounters(counters);
+      }
     }
   }
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobCounterInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobCounterInfo.java
index 6dbc918..8d5c469 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobCounterInfo.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobCounterInfo.java
@@ -81,6 +81,9 @@
     Map<TaskId, Task> tasks = job.getTasks();
     for (Task t : tasks.values()) {
       Counters counters = t.getCounters();
+      if (counters == null) {
+        continue;
+      }
       total.incrAllCounters(counters);
       switch (t.getType()) {
       case MAP:
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
index dd57408..81af358 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.mapreduce.v2.app;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -131,6 +132,17 @@
     }
     return map;
   }
+  
+  public static Map<JobId, Job> newJobs(ApplicationId appID, int numJobsPerApp,
+      int numTasksPerJob, int numAttemptsPerTask, boolean hasFailedTasks) {
+    Map<JobId, Job> map = Maps.newHashMap();
+    for (int j = 0; j < numJobsPerApp; ++j) {
+      Job job = newJob(appID, j, numTasksPerJob, numAttemptsPerTask, null,
+          hasFailedTasks);
+      map.put(job.getID(), job);
+    }
+    return map;
+  }
 
   public static JobId newJobID(ApplicationId appID, int i) {
     JobId id = Records.newRecord(JobId.class);
@@ -316,16 +328,16 @@
     };
   }
 
-  public static Map<TaskId, Task> newTasks(JobId jid, int n, int m) {
+  public static Map<TaskId, Task> newTasks(JobId jid, int n, int m, boolean hasFailedTasks) {
     Map<TaskId, Task> map = Maps.newHashMap();
     for (int i = 0; i < n; ++i) {
-      Task task = newTask(jid, i, m);
+      Task task = newTask(jid, i, m, hasFailedTasks);
       map.put(task.getID(), task);
     }
     return map;
   }
 
-  public static Task newTask(JobId jid, int i, int m) {
+  public static Task newTask(JobId jid, int i, int m, final boolean hasFailedTasks) {
     final TaskId tid = Records.newRecord(TaskId.class);
     tid.setJobId(jid);
     tid.setId(i);
@@ -345,6 +357,9 @@
 
       @Override
       public Counters getCounters() {
+        if (hasFailedTasks) {
+          return null;
+        }
         return new Counters(
           TypeConverter.fromYarn(report.getCounters()));
       }
@@ -394,8 +409,14 @@
 
   public static Counters getCounters(
       Collection<Task> tasks) {
+    List<Task> completedTasks = new ArrayList<Task>();
+    for (Task task : tasks) {
+      if (task.getCounters() != null) {
+        completedTasks.add(task);
+      }
+    }
     Counters counters = new Counters();
-    return JobImpl.incrTaskCounters(counters, tasks);
+    return JobImpl.incrTaskCounters(counters, completedTasks);
   }
 
   static class TaskCount {
@@ -434,10 +455,15 @@
   }
 
   public static Job newJob(ApplicationId appID, int i, int n, int m, Path confFile) {
+    return newJob(appID, i, n, m, confFile, false);
+  }
+  
+  public static Job newJob(ApplicationId appID, int i, int n, int m,
+      Path confFile, boolean hasFailedTasks) {
     final JobId id = newJobID(appID, i);
     final String name = newJobName();
     final JobReport report = newJobReport(id);
-    final Map<TaskId, Task> tasks = newTasks(id, n, m);
+    final Map<TaskId, Task> tasks = newTasks(id, n, m, hasFailedTasks);
     final TaskCount taskCount = getTaskCount(tasks.values());
     final Counters counters = getCounters(tasks
       .values());
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
index e5ad3fd..94c4f20 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
@@ -72,6 +72,7 @@
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
@@ -450,6 +451,121 @@
     assertFalse(eventHandler.internalError);
   }
   
+  @Test
+  public void testContainerCleanedWhileRunning() throws Exception {
+    ApplicationId appId = BuilderUtils.newApplicationId(1, 2);
+    ApplicationAttemptId appAttemptId =
+      BuilderUtils.newApplicationAttemptId(appId, 0);
+    JobId jobId = MRBuilderUtils.newJobId(appId, 1);
+    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
+    TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
+    Path jobFile = mock(Path.class);
+
+    MockEventHandler eventHandler = new MockEventHandler();
+    TaskAttemptListener taListener = mock(TaskAttemptListener.class);
+    when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
+
+    JobConf jobConf = new JobConf();
+    jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
+    jobConf.setBoolean("fs.file.impl.disable.cache", true);
+    jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
+    jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
+
+    TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
+    when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});
+
+    AppContext appCtx = mock(AppContext.class);
+    ClusterInfo clusterInfo = mock(ClusterInfo.class);
+    Resource resource = mock(Resource.class);
+    when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
+    when(clusterInfo.getMinContainerCapability()).thenReturn(resource);
+    when(resource.getMemory()).thenReturn(1024);
+
+    TaskAttemptImpl taImpl =
+      new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
+          splits, jobConf, taListener,
+          mock(OutputCommitter.class), mock(Token.class), new Credentials(),
+          new SystemClock(), appCtx);
+
+    NodeId nid = BuilderUtils.newNodeId("127.0.0.1", 0);
+    ContainerId contId = BuilderUtils.newContainerId(appAttemptId, 3);
+    Container container = mock(Container.class);
+    when(container.getId()).thenReturn(contId);
+    when(container.getNodeId()).thenReturn(nid);
+    when(container.getNodeHttpAddress()).thenReturn("localhost:0");
+
+    taImpl.handle(new TaskAttemptEvent(attemptId,
+        TaskAttemptEventType.TA_SCHEDULE));
+    taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
+        container, mock(Map.class)));
+    taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
+    assertEquals("Task attempt is not in running state", taImpl.getState(),
+        TaskAttemptState.RUNNING);
+    taImpl.handle(new TaskAttemptEvent(attemptId,
+        TaskAttemptEventType.TA_CONTAINER_CLEANED));
+    assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",
+        eventHandler.internalError);
+  }
+
+  @Test
+  public void testContainerCleanedWhileCommitting() throws Exception {
+    ApplicationId appId = BuilderUtils.newApplicationId(1, 2);
+    ApplicationAttemptId appAttemptId =
+      BuilderUtils.newApplicationAttemptId(appId, 0);
+    JobId jobId = MRBuilderUtils.newJobId(appId, 1);
+    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
+    TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
+    Path jobFile = mock(Path.class);
+
+    MockEventHandler eventHandler = new MockEventHandler();
+    TaskAttemptListener taListener = mock(TaskAttemptListener.class);
+    when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
+
+    JobConf jobConf = new JobConf();
+    jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
+    jobConf.setBoolean("fs.file.impl.disable.cache", true);
+    jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
+    jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
+
+    TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
+    when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});
+
+    AppContext appCtx = mock(AppContext.class);
+    ClusterInfo clusterInfo = mock(ClusterInfo.class);
+    Resource resource = mock(Resource.class);
+    when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
+    when(clusterInfo.getMinContainerCapability()).thenReturn(resource);
+    when(resource.getMemory()).thenReturn(1024);
+
+    TaskAttemptImpl taImpl =
+      new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
+          splits, jobConf, taListener,
+          mock(OutputCommitter.class), mock(Token.class), new Credentials(),
+          new SystemClock(), appCtx);
+
+    NodeId nid = BuilderUtils.newNodeId("127.0.0.1", 0);
+    ContainerId contId = BuilderUtils.newContainerId(appAttemptId, 3);
+    Container container = mock(Container.class);
+    when(container.getId()).thenReturn(contId);
+    when(container.getNodeId()).thenReturn(nid);
+    when(container.getNodeHttpAddress()).thenReturn("localhost:0");
+
+    taImpl.handle(new TaskAttemptEvent(attemptId,
+        TaskAttemptEventType.TA_SCHEDULE));
+    taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
+        container, mock(Map.class)));
+    taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
+    taImpl.handle(new TaskAttemptEvent(attemptId,
+        TaskAttemptEventType.TA_COMMIT_PENDING));
+
+    assertEquals("Task attempt is not in commit pending state", taImpl.getState(),
+        TaskAttemptState.COMMIT_PENDING);
+    taImpl.handle(new TaskAttemptEvent(attemptId,
+        TaskAttemptEventType.TA_CONTAINER_CLEANED));
+    assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",
+        eventHandler.internalError);
+  }
+
   public static class MockEventHandler implements EventHandler {
     public boolean internalError;
     
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java
index 98e11a8..838daea 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java
@@ -220,4 +220,58 @@
       ut.stop();
     }
   }
+
+  @Test
+  public void testMyShutdown() throws Exception {
+    LOG.info("in test Shutdown");
+
+    YarnRPC mockRpc = mock(YarnRPC.class);
+    AppContext mockContext = mock(AppContext.class);
+    @SuppressWarnings("rawtypes")
+    EventHandler mockEventHandler = mock(EventHandler.class);
+    when(mockContext.getEventHandler()).thenReturn(mockEventHandler);
+
+    ContainerManager mockCM = mock(ContainerManager.class);
+    when(mockRpc.getProxy(eq(ContainerManager.class),
+        any(InetSocketAddress.class), any(Configuration.class)))
+        .thenReturn(mockCM);
+
+    ContainerLauncherImplUnderTest ut =
+      new ContainerLauncherImplUnderTest(mockContext, mockRpc);
+
+    Configuration conf = new Configuration();
+    ut.init(conf);
+    ut.start();
+    try {
+      ContainerId contId = makeContainerId(0l, 0, 0, 1);
+      TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
+      String cmAddress = "127.0.0.1:8000";
+      StartContainerResponse startResp =
+        recordFactory.newRecordInstance(StartContainerResponse.class);
+      startResp.setServiceResponse(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID,
+          ShuffleHandler.serializeMetaData(80));
+
+      LOG.info("inserting launch event");
+      ContainerRemoteLaunchEvent mockLaunchEvent =
+        mock(ContainerRemoteLaunchEvent.class);
+      when(mockLaunchEvent.getType())
+        .thenReturn(EventType.CONTAINER_REMOTE_LAUNCH);
+      when(mockLaunchEvent.getContainerID())
+        .thenReturn(contId);
+      when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
+      when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
+      when(mockCM.startContainer(any(StartContainerRequest.class))).thenReturn(startResp);
+      ut.handle(mockLaunchEvent);
+
+      ut.waitForPoolToIdle();
+
+      verify(mockCM).startContainer(any(StartContainerRequest.class));
+
+      // skip cleanup and make sure stop kills the container
+
+    } finally {
+      ut.stop();
+      verify(mockCM).stopContainer(any(StopContainerRequest.class));
+}
+  }
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
index 1dd4558..14d5302 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
new file mode 100644
index 0000000..0975dea
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
@@ -0,0 +1 @@
+org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index cfb8ce4..c4177c9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
@@ -37,6 +40,7 @@
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdfs</artifactId>
+      <scope>test</scope>
     </dependency>
   </dependencies>
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
index f83c07e..1232645 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
@@ -25,6 +25,7 @@
 import java.io.IOException;
 import java.io.OutputStream;
 import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
 import java.nio.IntBuffer;
 import java.util.ArrayList;
 import java.util.List;
@@ -911,7 +912,9 @@
       maxMemUsage -= maxMemUsage % METASIZE;
       kvbuffer = new byte[maxMemUsage];
       bufvoid = kvbuffer.length;
-      kvmeta = ByteBuffer.wrap(kvbuffer).asIntBuffer();
+      kvmeta = ByteBuffer.wrap(kvbuffer)
+         .order(ByteOrder.nativeOrder())
+         .asIntBuffer();
       setEquator(0);
       bufstart = bufend = bufindex = equator;
       kvstart = kvend = kvindex;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
index 06a3d48..f7a7dd4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
@@ -53,7 +53,6 @@
 import org.apache.hadoop.io.serializer.Deserializer;
 import org.apache.hadoop.io.serializer.SerializationFactory;
 import org.apache.hadoop.mapred.IFile.Writer;
-import org.apache.hadoop.mapreduce.Counter;
 import org.apache.hadoop.mapreduce.FileSystemCounter;
 import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.TaskCounter;
@@ -569,7 +568,21 @@
         resourceCalculator.getProcResourceValues().getCumulativeCpuTime();
     }
   }
-  
+
+  public static String normalizeStatus(String status, Configuration conf) {
+    // Check to see if the status string is too long
+    // and truncate it if needed.
+    int progressStatusLength = conf.getInt(
+        MRConfig.PROGRESS_STATUS_LEN_LIMIT_KEY,
+        MRConfig.PROGRESS_STATUS_LEN_LIMIT_DEFAULT);
+    if (status.length() > progressStatusLength) {
+      LOG.warn("Task status: \"" + status + "\" truncated to max limit ("
+          + progressStatusLength + " characters)");
+      status = status.substring(0, progressStatusLength);
+    }
+    return status;
+  }
+
   @InterfaceAudience.Private
   @InterfaceStability.Unstable
   protected class TaskReporter 
@@ -603,7 +616,7 @@
       return progressFlag.getAndSet(false);
     }
     public void setStatus(String status) {
-      taskProgress.setStatus(status);
+      taskProgress.setStatus(normalizeStatus(status, conf));
       // indicate that progress update needs to be sent
       setProgressFlag();
     }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
index 4038f65..148df50 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
@@ -38,7 +38,6 @@
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.QueueACL;
@@ -433,8 +432,7 @@
       LOG.debug("Printing tokens for job: " + jobId);
       for(Token<?> token: credentials.getAllTokens()) {
         if (token.getKind().toString().equals("HDFS_DELEGATION_TOKEN")) {
-          LOG.debug("Submitting with " +
-              org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier.stringifyToken(token));
+          LOG.debug("Submitting with " + token);
         }
       }
     }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
index 4516cb9..82ee5f0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
@@ -71,4 +71,12 @@
 
   public static final String TASK_LOCAL_OUTPUT_CLASS =
   "mapreduce.task.local.output.class";
+
+  public static final String PROGRESS_STATUS_LEN_LIMIT_KEY =
+    "mapreduce.task.max.status.length";
+  public static final int PROGRESS_STATUS_LEN_LIMIT_DEFAULT = 512;
+
+  public static final int MAX_BLOCK_LOCATIONS_DEFAULT = 10;
+  public static final String MAX_BLOCK_LOCATIONS_KEY =
+    "mapreduce.job.max.split.locations";
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SecureShuffleUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SecureShuffleUtils.java
index ef9fe2c..c4c07dd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SecureShuffleUtils.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SecureShuffleUtils.java
@@ -29,6 +29,7 @@
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.WritableComparator;
 import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
 import org.apache.hadoop.record.Utils;
 
@@ -67,7 +68,7 @@
    */
   private static boolean verifyHash(byte[] hash, byte[] msg, SecretKey key) {
     byte[] msg_hash = generateByteHash(msg, key);
-    return Utils.compareBytes(msg_hash, 0, msg_hash.length, hash, 0, hash.length) == 0;
+    return WritableComparator.compareBytes(msg_hash, 0, msg_hash.length, hash, 0, hash.length) == 0;
   }
   
   /**
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
index 9e8c190..1109f3f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
@@ -30,7 +30,6 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Master;
@@ -179,16 +178,14 @@
    * @param namenode
    * @return delegation token
    */
-  @SuppressWarnings("unchecked")
   @InterfaceAudience.Private
-  public static Token<DelegationTokenIdentifier> getDelegationToken(
+  public static Token<?> getDelegationToken(
       Credentials credentials, String namenode) {
     //No fs specific tokens issues by this fs. It may however issue tokens
     // for other filesystems - which would be keyed by that filesystems name.
     if (namenode == null)  
       return null;
-    return (Token<DelegationTokenIdentifier>) credentials.getToken(new Text(
-        namenode));
+    return (Token<?>) credentials.getToken(new Text(namenode));
   }
 
   /**
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java
index e4675b5..9000777 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java
@@ -39,7 +39,6 @@
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.util.StringUtils;
 
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/JobSplitWriter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/JobSplitWriter.java
index b6e44d7..e6ecac5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/JobSplitWriter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/JobSplitWriter.java
@@ -34,6 +34,7 @@
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.JobSubmissionFiles;
+import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -48,6 +49,7 @@
 
   private static final int splitVersion = JobSplit.META_SPLIT_VERSION;
   private static final byte[] SPLIT_FILE_HEADER;
+
   static {
     try {
       SPLIT_FILE_HEADER = "SPL".getBytes("UTF-8");
@@ -82,7 +84,7 @@
   throws IOException {
     FSDataOutputStream out = createFile(fs, 
         JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
-    SplitMetaInfo[] info = writeOldSplits(splits, out);
+    SplitMetaInfo[] info = writeOldSplits(splits, out, conf);
     out.close();
     writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), 
         new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
@@ -114,6 +116,8 @@
     if (array.length != 0) {
       SerializationFactory factory = new SerializationFactory(conf);
       int i = 0;
+      int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
+          MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
       long offset = out.getPos();
       for(T split: array) {
         long prevCount = out.getPos();
@@ -123,9 +127,15 @@
         serializer.open(out);
         serializer.serialize(split);
         long currCount = out.getPos();
+        String[] locations = split.getLocations();
+        if (locations.length > maxBlockLocations) {
+          throw new IOException("Max block location exceeded for split: "
+              + split + " splitsize: " + locations.length +
+              " maxsize: " + maxBlockLocations);
+        }
         info[i++] = 
           new JobSplit.SplitMetaInfo( 
-              split.getLocations(), offset,
+              locations, offset,
               split.getLength());
         offset += currCount - prevCount;
       }
@@ -135,18 +145,26 @@
   
   private static SplitMetaInfo[] writeOldSplits(
       org.apache.hadoop.mapred.InputSplit[] splits,
-      FSDataOutputStream out) throws IOException {
+      FSDataOutputStream out, Configuration conf) throws IOException {
     SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
     if (splits.length != 0) {
       int i = 0;
       long offset = out.getPos();
+      int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
+          MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
       for(org.apache.hadoop.mapred.InputSplit split: splits) {
         long prevLen = out.getPos();
         Text.writeString(out, split.getClass().getName());
         split.write(out);
         long currLen = out.getPos();
+        String[] locations = split.getLocations();
+        if (locations.length > maxBlockLocations) {
+          throw new IOException("Max block location exceeded for split: "
+              + split + " splitsize: " + locations.length +
+              " maxsize: " + maxBlockLocations);
+        }
         info[i++] = new JobSplit.SplitMetaInfo( 
-            split.getLocations(), offset,
+            locations, offset,
             split.getLength());
         offset += currLen - prevLen;
       }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/SplitMetaInfoReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/SplitMetaInfoReader.java
index 9bba13a..f15dd28 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/SplitMetaInfoReader.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/SplitMetaInfoReader.java
@@ -47,6 +47,7 @@
     long maxMetaInfoSize = conf.getLong(JTConfig.JT_MAX_JOB_SPLIT_METAINFO_SIZE, 
         10000000L);
     Path metaSplitFile = JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir);
+    String jobSplitFile = JobSubmissionFiles.getJobSplitFile(jobSubmitDir).toString();
     FileStatus fStatus = fs.getFileStatus(metaSplitFile);
     if (maxMetaInfoSize > 0 && fStatus.getLen() > maxMetaInfoSize) {
       throw new IOException("Split metadata size exceeded " +
@@ -70,7 +71,7 @@
       JobSplit.SplitMetaInfo splitMetaInfo = new JobSplit.SplitMetaInfo();
       splitMetaInfo.readFields(in);
       JobSplit.TaskSplitIndex splitIndex = new JobSplit.TaskSplitIndex(
-          JobSubmissionFiles.getJobSplitFile(jobSubmitDir).toString(), 
+          jobSplitFile, 
           splitMetaInfo.getStartOffset());
       allSplitMetaInfo[i] = new JobSplit.TaskSplitMetaInfo(splitIndex, 
           splitMetaInfo.getLocations(), 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/TaskAttemptContextImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/TaskAttemptContextImpl.java
index 9b039b0..333f57b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/TaskAttemptContextImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/TaskAttemptContextImpl.java
@@ -21,6 +21,7 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.Task;
 import org.apache.hadoop.mapreduce.Counter;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.StatusReporter;
@@ -92,8 +93,9 @@
    */
   @Override
   public void setStatus(String status) {
-    setStatusString(status);
-    reporter.setStatus(status);
+    String normalizedStatus = Task.normalizeStatus(status, conf);
+    setStatusString(normalizedStatus);
+    reporter.setStatus(normalizedStatus);
   }
 
   public static class DummyReporter extends StatusReporter {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
new file mode 100644
index 0000000..f797a6a
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
@@ -0,0 +1,2 @@
+org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier
+org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 34e5a7a..afb76e8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1126,6 +1126,15 @@
   </description>
 </property>
 
+<property>
+  <name>mapreduce.shuffle.port</name>
+  <value>8080</value>
+  <description>Default port that the ShuffleHandler will run on. ShuffleHandler 
+   is a service run at the NodeManager to facilitate transfers of intermediate 
+   Map outputs to requesting Reducers.
+  </description>
+</property>
+
 <!--  Node health script variables -->
 
 <property>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
index 9fa93b4..caf65d7 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
index 972b295c..b21218e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
@@ -135,10 +135,11 @@
             th(_TH, "Node").
             th(_TH, "Logs").
             _();
+        boolean odd = false;
           for (AMInfo amInfo : amInfos) {
             AMAttemptInfo attempt = new AMAttemptInfo(amInfo,
                 job.getId(), job.getUserName(), "", "");
-            table.tr().
+            table.tr((odd = !odd) ? _ODD : _EVEN).
               td(String.valueOf(attempt.getAttemptId())).
               td(new Date(attempt.getStartTime()).toString()).
               td().a(".nodelink", url("http://", attempt.getNodeHttpAddress()), 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java
index b8b64a6..74ca32c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java
@@ -43,6 +43,14 @@
         numAttemptsPerTask);
     return split(mocked);
   }
+  
+  public static JobsPair newHistoryJobs(ApplicationId appID, int numJobsPerApp,
+      int numTasksPerJob, int numAttemptsPerTask, boolean hasFailedTasks)
+      throws IOException {
+    Map<JobId, Job> mocked = newJobs(appID, numJobsPerApp, numTasksPerJob,
+        numAttemptsPerTask, hasFailedTasks);
+    return split(mocked);
+  }
 
   private static JobsPair split(Map<JobId, Job> mocked) throws IOException {
     JobsPair ret = new JobsPair();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java
index 4ef4d3e..0fb1f75 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java
@@ -63,10 +63,16 @@
     final Map<JobId, Job> jobs;
     final long startTime = System.currentTimeMillis();
 
-    TestAppContext(int appid, int numJobs, int numTasks, int numAttempts) {
+    TestAppContext(int appid, int numJobs, int numTasks, int numAttempts,
+        boolean hasFailedTasks) {
       appID = MockJobs.newAppID(appid);
       appAttemptID = MockJobs.newAppAttemptID(appID, 0);
-      jobs = MockJobs.newJobs(appID, numJobs, numTasks, numAttempts);
+      jobs = MockJobs.newJobs(appID, numJobs, numTasks, numAttempts,
+          hasFailedTasks);
+    }
+    
+    TestAppContext(int appid, int numJobs, int numTasks, int numAttempts) {
+      this(appid, numJobs, numTasks, numAttempts, false);
     }
 
     TestAppContext() {
@@ -198,6 +204,14 @@
                          appContext, params);
   }
   
+  @Test public void testJobCounterViewForKilledJob() {
+    LOG.info("JobCounterViewForKilledJob");
+    AppContext appContext = new TestAppContext(0, 1, 1, 1, true);
+    Map<String, String> params = TestAMWebApp.getJobParams(appContext);
+    WebAppTests.testPage(HsCountersPage.class, AppContext.class,
+        appContext, params);
+  }
+  
   @Test public void testSingleCounterView() {
     LOG.info("HsSingleCounterPage");
     WebAppTests.testPage(HsSingleCounterPage.class, AppContext.class,
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobs.java
index 0452406..2ce6c5d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobs.java
@@ -101,13 +101,15 @@
     final Map<JobId, Job> partialJobs;
     final Map<JobId, Job> fullJobs;
     final long startTime = System.currentTimeMillis();
-
-    TestAppContext(int appid, int numJobs, int numTasks, int numAttempts) {
+    
+    TestAppContext(int appid, int numJobs, int numTasks, int numAttempts,
+        boolean hasFailedTasks) {
       appID = MockJobs.newAppID(appid);
       appAttemptID = MockJobs.newAppAttemptID(appID, 0);
       JobsPair jobs;
       try {
-        jobs = MockHistoryJobs.newHistoryJobs(appID, numJobs, numTasks, numAttempts);
+        jobs = MockHistoryJobs.newHistoryJobs(appID, numJobs, numTasks,
+            numAttempts, hasFailedTasks);
       } catch (IOException e) {
         throw new YarnException(e);
       }
@@ -115,6 +117,10 @@
       fullJobs = jobs.full;
     }
 
+    TestAppContext(int appid, int numJobs, int numTasks, int numAttempts) {
+      this(appid, numJobs, numTasks, numAttempts, false);
+    }
+
     TestAppContext() {
       this(0, 1, 2, 1);
     }
@@ -628,6 +634,46 @@
       verifyHsJobCounters(info, jobsMap.get(id));
     }
   }
+  
+  @Test
+  public void testJobCountersForKilledJob() throws Exception {
+    WebResource r = resource();
+    appContext = new TestAppContext(0, 1, 1, 1, true);
+    injector = Guice.createInjector(new ServletModule() {
+      @Override
+      protected void configureServlets() {
+
+        webApp = mock(HsWebApp.class);
+        when(webApp.name()).thenReturn("hsmockwebapp");
+
+        bind(JAXBContextResolver.class);
+        bind(HsWebServices.class);
+        bind(GenericExceptionHandler.class);
+        bind(WebApp.class).toInstance(webApp);
+        bind(AppContext.class).toInstance(appContext);
+        bind(HistoryContext.class).toInstance(appContext);
+        bind(Configuration.class).toInstance(conf);
+
+        serve("/*").with(GuiceContainer.class);
+      }
+    });
+    
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1").path("history")
+          .path("mapreduce").path("jobs").path(jobId).path("counters/")
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject info = json.getJSONObject("jobCounters");
+      WebServicesTestUtils.checkStringMatch("id", MRApps.toString(id),
+          info.getString("id"));
+      assertTrue("Job shouldn't contain any counters", info.length() == 1);
+    }
+  }
 
   @Test
   public void testJobCountersDefault() throws JSONException, Exception {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
index e3853e9..2c2370b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
index bb287cf..20d27fb 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
@@ -230,7 +230,7 @@
       "[-verbose]";
     
     String jarFile = null;
-    int inputLines = 1; 
+    long inputLines = 1; 
     int numRuns = 1;
     int numMaps = 2; 
     int numReduces = 1;
@@ -248,7 +248,7 @@
       } else if (args[i].equals("-reduces")) {
         numReduces = Integer.parseInt(args[++i]);
       } else if (args[i].equals("-inputLines")) {
-        inputLines = Integer.parseInt(args[++i]);
+        inputLines = Long.parseLong(args[++i]);
       } else if (args[i].equals("-inputType")) {
         String s = args[++i]; 
         if (s.equalsIgnoreCase("ascending")) {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestBlockLimits.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestBlockLimits.java
new file mode 100644
index 0000000..d8b250a
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestBlockLimits.java
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.File;
+import java.io.IOException;
+import java.util.Iterator;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.StringUtils;
+
+/**
+ * A JUnit test to test limits on block locations
+ */
+public class TestBlockLimits extends TestCase {
+  private static String TEST_ROOT_DIR = new File(System.getProperty(
+      "test.build.data", "/tmp")).toURI().toString().replace(' ', '+');
+
+  public void testWithLimits() throws IOException, InterruptedException,
+      ClassNotFoundException {
+    MiniMRClientCluster mr = null;
+    try {
+      mr = MiniMRClientClusterFactory.create(this.getClass(), 2,
+          new Configuration());
+      runCustomFormat(mr);
+    } finally {
+      if (mr != null) {
+        mr.stop();
+      }
+    }
+  }
+
+  private void runCustomFormat(MiniMRClientCluster mr) throws IOException {
+    JobConf job = new JobConf(mr.getConfig());
+    FileSystem fileSys = FileSystem.get(job);
+    Path testDir = new Path(TEST_ROOT_DIR + "/test_mini_mr_local");
+    Path outDir = new Path(testDir, "out");
+    System.out.println("testDir= " + testDir);
+    fileSys.delete(testDir, true);
+    job.setInputFormat(MyInputFormat.class);
+    job.setOutputFormat(MyOutputFormat.class);
+    job.setOutputKeyClass(Text.class);
+    job.setOutputValueClass(IntWritable.class);
+
+    job.setMapperClass(MyMapper.class);
+    job.setReducerClass(MyReducer.class);
+    job.setNumMapTasks(100);
+    job.setNumReduceTasks(1);
+    job.set("non.std.out", outDir.toString());
+    try {
+      JobClient.runJob(job);
+      assertTrue(false);
+    } catch (IOException ie) {
+      System.out.println("Failed job " + StringUtils.stringifyException(ie));
+    } finally {
+      fileSys.delete(testDir, true);
+    }
+
+  }
+
+  static class MyMapper extends MapReduceBase implements
+      Mapper<WritableComparable, Writable, WritableComparable, Writable> {
+
+    public void map(WritableComparable key, Writable value,
+        OutputCollector<WritableComparable, Writable> out, Reporter reporter)
+        throws IOException {
+    }
+  }
+
+  static class MyReducer extends MapReduceBase implements
+      Reducer<WritableComparable, Writable, WritableComparable, Writable> {
+    public void reduce(WritableComparable key, Iterator<Writable> values,
+        OutputCollector<WritableComparable, Writable> output, Reporter reporter)
+        throws IOException {
+    }
+  }
+
+  private static class MyInputFormat implements InputFormat<IntWritable, Text> {
+
+    private static class MySplit implements InputSplit {
+      int first;
+      int length;
+
+      public MySplit() {
+      }
+
+      public MySplit(int first, int length) {
+        this.first = first;
+        this.length = length;
+      }
+
+      public String[] getLocations() {
+        return new String[200];
+      }
+
+      public long getLength() {
+        return length;
+      }
+
+      public void write(DataOutput out) throws IOException {
+        WritableUtils.writeVInt(out, first);
+        WritableUtils.writeVInt(out, length);
+      }
+
+      public void readFields(DataInput in) throws IOException {
+        first = WritableUtils.readVInt(in);
+        length = WritableUtils.readVInt(in);
+      }
+    }
+
+    public InputSplit[] getSplits(JobConf job, int numSplits)
+        throws IOException {
+      return new MySplit[] { new MySplit(0, 1), new MySplit(1, 3),
+          new MySplit(4, 2) };
+    }
+
+    public RecordReader<IntWritable, Text> getRecordReader(InputSplit split,
+        JobConf job, Reporter reporter) throws IOException {
+      return null;
+    }
+
+  }
+
+  static class MyOutputFormat implements OutputFormat {
+    static class MyRecordWriter implements RecordWriter<Object, Object> {
+
+      public MyRecordWriter(Path outputFile, JobConf job) throws IOException {
+      }
+
+      public void write(Object key, Object value) throws IOException {
+        return;
+      }
+
+      public void close(Reporter reporter) throws IOException {
+      }
+    }
+
+    public RecordWriter getRecordWriter(FileSystem ignored, JobConf job,
+        String name, Progressable progress) throws IOException {
+      return new MyRecordWriter(new Path(job.get("non.std.out")), job);
+    }
+
+    public void checkOutputSpecs(FileSystem ignored, JobConf job)
+        throws IOException {
+    }
+  }
+
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestReporter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestReporter.java
index 43b1a1d..48df092 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestReporter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestReporter.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.mapred;
 
+import java.io.DataOutputStream;
 import java.io.IOException;
 import java.util.Iterator;
 
@@ -25,10 +26,15 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
+
 import static org.junit.Assert.*;
 
 /**
@@ -98,7 +104,28 @@
                    progressRange, reporter.getProgress(), 0f);
     }
   }
-  
+
+  static class StatusLimitMapper extends
+      org.apache.hadoop.mapreduce.Mapper<LongWritable, Text, Text, Text> {
+
+    @Override
+    public void map(LongWritable key, Text value, Context context)
+        throws IOException {
+      StringBuilder sb = new StringBuilder(512);
+      for (int i = 0; i < 1000; i++) {
+        sb.append("a");
+      }
+      context.setStatus(sb.toString());
+      int progressStatusLength = context.getConfiguration().getInt(
+          MRConfig.PROGRESS_STATUS_LEN_LIMIT_KEY,
+          MRConfig.PROGRESS_STATUS_LEN_LIMIT_DEFAULT);
+
+      if (context.getStatus().length() > progressStatusLength) {
+        throw new IOException("Status is not truncated");
+      }
+    }
+  }
+
   /**
    * Test {@link Reporter}'s progress for a map-only job.
    * This will make sure that only the map phase decides the attempt's progress.
@@ -166,7 +193,6 @@
   /**
    * Test {@link Reporter}'s progress for map-reduce job.
    */
-  @SuppressWarnings("deprecation")
   @Test
   public void testReporterProgressForMRJob() throws IOException {
     Path test = new Path(testRootTempDir, "testReporterProgressForMRJob");
@@ -186,4 +212,39 @@
     
     assertTrue("Job failed", job.isSuccessful());
   }
+
+  @Test
+  public void testStatusLimit() throws IOException, InterruptedException,
+      ClassNotFoundException {
+    Path test = new Path(testRootTempDir, "testStatusLimit");
+
+    Configuration conf = new Configuration();
+    Path inDir = new Path(test, "in");
+    Path outDir = new Path(test, "out");
+    FileSystem fs = FileSystem.get(conf);
+    if (fs.exists(inDir)) {
+      fs.delete(inDir, true);
+    }
+    fs.mkdirs(inDir);
+    DataOutputStream file = fs.create(new Path(inDir, "part-" + 0));
+    file.writeBytes("testStatusLimit");
+    file.close();
+
+    if (fs.exists(outDir)) {
+      fs.delete(outDir, true);
+    }
+
+    Job job = Job.getInstance(conf, "testStatusLimit");
+
+    job.setMapperClass(StatusLimitMapper.class);
+    job.setNumReduceTasks(0);
+
+    FileInputFormat.addInputPath(job, inDir);
+    FileOutputFormat.setOutputPath(job, outDir);
+
+    job.waitForCompletion(true);
+
+    assertTrue("Job failed", job.isSuccessful());
+  }
+
 }
\ No newline at end of file
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java
index a7939e5..6f1e080 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java
@@ -30,7 +30,11 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.ClusterMapReduceTestCase;
+import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
 import org.apache.hadoop.mapreduce.tools.CLI;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
@@ -65,6 +69,40 @@
     }
   }
 
+  private static class BadOutputFormat
+    extends TextOutputFormat {
+    @Override
+    public void checkOutputSpecs(JobContext job)
+        throws FileAlreadyExistsException, IOException {
+      throw new IOException();
+    }
+  }
+
+  @Test
+  public void testJobSubmissionSpecsAndFiles() throws Exception {
+    Configuration conf = createJobConf();
+    Job job = MapReduceTestUtil.createJob(conf,
+          getInputDir(), getOutputDir(), 1, 1);
+    job.setOutputFormatClass(BadOutputFormat.class);
+    try {
+      job.submit();
+      fail("Should've thrown an exception while checking output specs.");
+    } catch (Exception e) {
+      assertTrue(e instanceof IOException);
+    }
+    JobID jobId = job.getJobID();
+    Cluster cluster = new Cluster(conf);
+    Path jobStagingArea = JobSubmissionFiles.getStagingDir(
+        cluster,
+        job.getConfiguration());
+    Path submitJobDir = new Path(jobStagingArea, jobId.toString());
+    Path submitJobFile = JobSubmissionFiles.getJobConfPath(submitJobDir);
+    assertFalse(
+        "Shouldn't have created a job file if job specs failed.",
+        FileSystem.get(conf).exists(submitJobFile)
+    );
+  }
+
   @Test
   public void testJobClient() throws Exception {
     Configuration conf = createJobConf();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestRMNMInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestRMNMInfo.java
index d3b4752..4ee4856 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestRMNMInfo.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestRMNMInfo.java
@@ -21,6 +21,8 @@
 import java.io.File;
 import java.io.IOException;
 import java.util.Iterator;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -28,8 +30,11 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.RMNMInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.codehaus.jackson.JsonNode;
 import org.codehaus.jackson.map.ObjectMapper;
@@ -37,6 +42,7 @@
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import static org.mockito.Mockito.*;
 
 public class TestRMNMInfo {
   private static final Log LOG = LogFactory.getLog(TestRMNMInfo.class);
@@ -116,14 +122,47 @@
               n.get("HealthStatus").getValueAsText().contains("Healthy"));
       Assert.assertNotNull(n.get("LastHealthUpdate"));
       Assert.assertNotNull(n.get("HealthReport"));
-      Assert.assertNotNull(n.get("NumContainersMB"));
+      Assert.assertNotNull(n.get("NumContainers"));
       Assert.assertEquals(
               n.get("NodeId") + ": Unexpected number of used containers",
-              0, n.get("NumContainersMB").getValueAsInt());
+              0, n.get("NumContainers").getValueAsInt());
       Assert.assertEquals(
               n.get("NodeId") + ": Unexpected amount of used memory",
               0, n.get("UsedMemoryMB").getValueAsInt());
       Assert.assertNotNull(n.get("AvailableMemoryMB"));
     }
   }
+  
+  @Test
+  public void testRMNMInfoMissmatch() throws Exception {
+    RMContext rmc = mock(RMContext.class);
+    ResourceScheduler rms = mock(ResourceScheduler.class);
+    ConcurrentMap<NodeId, RMNode> map = new ConcurrentHashMap<NodeId, RMNode>();
+    RMNode node = MockNodes.newNodeInfo(1, MockNodes.newResource(4 * 1024));
+    map.put(node.getNodeID(), node);
+    when(rmc.getRMNodes()).thenReturn(map);
+    
+    RMNMInfo rmInfo = new RMNMInfo(rmc,rms);
+    String liveNMs = rmInfo.getLiveNodeManagers();
+    ObjectMapper mapper = new ObjectMapper();
+    JsonNode jn = mapper.readTree(liveNMs);
+    Assert.assertEquals("Unexpected number of live nodes:",
+                                               1, jn.size());
+    Iterator<JsonNode> it = jn.iterator();
+    while (it.hasNext()) {
+      JsonNode n = it.next();
+      Assert.assertNotNull(n.get("HostName"));
+      Assert.assertNotNull(n.get("Rack"));
+      Assert.assertTrue("Node " + n.get("NodeId") + " should be RUNNING",
+              n.get("State").getValueAsText().contains("RUNNING"));
+      Assert.assertNotNull(n.get("NodeHTTPAddress"));
+      Assert.assertTrue("Node " + n.get("NodeId") + " should be Healthy",
+              n.get("HealthStatus").getValueAsText().contains("Healthy"));
+      Assert.assertNotNull(n.get("LastHealthUpdate"));
+      Assert.assertNotNull(n.get("HealthReport"));
+      Assert.assertNull(n.get("NumContainers"));
+      Assert.assertNull(n.get("UsedMemoryMB"));
+      Assert.assertNull(n.get("AvailableMemoryMB"));
+    }
+  }
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
index 07f436e..995e3e8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
index 5a96b31..f936762 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
@@ -114,8 +117,8 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <!-- needed for security and runtime -->
       <artifactId>hadoop-hdfs</artifactId>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>com.google.inject.extensions</groupId>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
index 3520839..03d0802 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
@@ -57,7 +60,7 @@
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdfs</artifactId>
-      <scope>provided</scope>
+      <scope>runtime</scope>
     </dependency>
     <dependency>
        <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/MultiFileWordCount.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/MultiFileWordCount.java
index 034dce1..af5e370 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/MultiFileWordCount.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/MultiFileWordCount.java
@@ -130,8 +130,8 @@
     public CombineFileLineRecordReader(CombineFileSplit split,
         TaskAttemptContext context, Integer index) throws IOException {
       
-      fs = FileSystem.get(context.getConfiguration());
       this.path = split.getPath(index);
+      fs = this.path.getFileSystem(context.getConfiguration());
       this.startOffset = split.getOffset(index);
       this.end = startOffset + split.getLength(index);
       boolean skipFirstLine = false;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java
index 4ef0033..f957ad9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java
@@ -212,7 +212,7 @@
     }
     FileSystem outFs = partFile.getFileSystem(conf);
     DataOutputStream writer = outFs.create(partFile, true, 64*1024, (short) 10, 
-                                           outFs.getDefaultBlockSize());
+                                           outFs.getDefaultBlockSize(partFile));
     for (int i = 0; i < samples; i++) {
       try {
         samplerReader[i].join();
diff --git a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-config.sh b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-config.sh
index 934a461..275869f 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-config.sh
+++ b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-config.sh
@@ -49,7 +49,7 @@
 fi
  
 # Allow alternate conf dir location.
-YARN_CONF_DIR="${HADOOP_CONF_DIR:-$YARN_HOME/conf}"
+export YARN_CONF_DIR="${HADOOP_CONF_DIR:-$YARN_HOME/conf}"
 
 #check to see it is specified whether to use the slaves or the
 # masters file
diff --git a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh
index 89ae9d8..07326a1 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh
+++ b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh
@@ -93,6 +93,7 @@
 export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,RFA}
 log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out
 pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid
+YARN_STOP_TIMEOUT=${YARN_STOP_TIMEOUT:-5}
 
 # Set default scheduling priority
 if [ "$YARN_NICENESS" = "" ]; then
@@ -128,9 +129,15 @@
   (stop)
 
     if [ -f $pid ]; then
-      if kill -0 `cat $pid` > /dev/null 2>&1; then
+      TARGET_PID=`cat $pid`
+      if kill -0 $TARGET_PID > /dev/null 2>&1; then
         echo stopping $command
-        kill `cat $pid`
+        kill $TARGET_PID
+        sleep $YARN_STOP_TIMEOUT
+        if kill -0 $TARGET_PID > /dev/null 2>&1; then
+          echo "$command did not stop gracefully after $YARN_STOP_TIMEOUT seconds: killing with kill -9"
+          kill -9 $TARGET_PID
+        fi
       else
         echo no $command to stop
       fi
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/pom.xml
index 462ecc3..ff8a548 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project xmlns:pom="http://maven.apache.org/POM/4.0.0">
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
index 37a5e99..babaa34 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <artifactId>hadoop-yarn-applications</artifactId>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
index fd51584..b396bc3 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index deb96c0..8d8347d 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
new file mode 100644
index 0000000..fc669de
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
@@ -0,0 +1,4 @@
+org.apache.hadoop.yarn.security.ContainerTokenIdentifier
+org.apache.hadoop.yarn.security.ApplicationTokenIdentifier
+org.apache.hadoop.yarn.security.client.ClientTokenIdentifier
+org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index 2dd4277..bb9b1b2 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
index cf5d7ff..fb3c97b 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
index 7d4de87..4ce630a 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
@@ -68,8 +68,10 @@
    */
   public void delete(String user, Path subDir, Path... baseDirs) {
     // TODO if parent owned by NM, rename within parent inline
-    sched.schedule(new FileDeletion(user, subDir, baseDirs),
-        debugDelay, TimeUnit.SECONDS);
+    if (debugDelay != -1) {
+      sched.schedule(new FileDeletion(user, subDir, baseDirs), debugDelay,
+          TimeUnit.SECONDS);
+    }
   }
 
   @Override
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index ba3e53e..8209c1f 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -181,7 +181,7 @@
 
   private void registerWithRM() throws YarnRemoteException {
     this.resourceTracker = getRMClient();
-    LOG.info("Connected to ResourceManager at " + this.rmAddress);
+    LOG.info("Connecting to ResourceManager at " + this.rmAddress);
     
     RegisterNodeManagerRequest request = recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
     request.setHttpPort(this.httpPort);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index d82186f..4e0e0b4 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -74,6 +74,7 @@
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationContainerInitEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationFinishEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationImpl;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationInitEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
@@ -524,8 +525,8 @@
           (CMgrCompletedAppsEvent) event;
       for (ApplicationId appID : appsFinishedEvent.getAppsToCleanup()) {
         this.dispatcher.getEventHandler().handle(
-            new ApplicationEvent(appID,
-                ApplicationEventType.FINISH_APPLICATION));
+            new ApplicationFinishEvent(appID,
+                "Application Killed by ResourceManager"));
       }
       break;
     case FINISH_CONTAINERS:
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationEventType.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationEventType.java
index f988a3e..24c9a13 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationEventType.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationEventType.java
@@ -23,7 +23,7 @@
   // Source: ContainerManager
   INIT_APPLICATION,
   INIT_CONTAINER,
-  FINISH_APPLICATION,
+  FINISH_APPLICATION, // Source: LogAggregationService if init fails
 
   // Source: ResourceLocalizationService
   APPLICATION_INITED,
@@ -33,5 +33,6 @@
   APPLICATION_CONTAINER_FINISHED,
 
   // Source: Log Handler
+  APPLICATION_LOG_HANDLING_INITED,
   APPLICATION_LOG_HANDLING_FINISHED
 }
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationFinishEvent.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationFinishEvent.java
new file mode 100644
index 0000000..0fbdca8
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationFinishEvent.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.application;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+
+/**
+ * Finish/abort event
+ */
+public class ApplicationFinishEvent extends ApplicationEvent {
+  private final String diagnostic;
+
+  /**
+   * Application event to abort all containers associated with the app
+   * @param appId to abort containers
+   * @param diagnostic reason for the abort
+   */
+  public ApplicationFinishEvent(ApplicationId appId, String diagnostic) {
+    super(appId, ApplicationEventType.FINISH_APPLICATION);
+    this.diagnostic = diagnostic;
+  }
+
+  /**
+   * Why the app was aborted
+   * @return diagnostic message
+   */
+  public String getDiagnostic() {
+    return diagnostic;
+  }
+}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
index d1bcaf2..2c61b70 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
@@ -141,6 +141,9 @@
                    ApplicationState.APPLICATION_RESOURCES_CLEANINGUP),
                ApplicationEventType.FINISH_APPLICATION,
                new AppFinishTriggeredTransition())
+           .addTransition(ApplicationState.INITING, ApplicationState.INITING,
+               ApplicationEventType.APPLICATION_LOG_HANDLING_INITED,
+               new AppLogInitDoneTransition())
            .addTransition(ApplicationState.INITING, ApplicationState.RUNNING,
                ApplicationEventType.APPLICATION_INITED,
                new AppInitDoneTransition())
@@ -192,8 +195,7 @@
   /**
    * Notify services of new application.
    * 
-   * In particular, this requests that the {@link ResourceLocalizationService}
-   * localize the application-scoped resources.
+   * In particular, this initializes the {@link LogAggregationService}
    */
   @SuppressWarnings("unchecked")
   static class AppInitTransition implements
@@ -203,6 +205,27 @@
       ApplicationInitEvent initEvent = (ApplicationInitEvent)event;
       app.applicationACLs = initEvent.getApplicationACLs();
       app.aclsManager.addApplication(app.getAppId(), app.applicationACLs);
+      // Inform the logAggregator
+      app.dispatcher.getEventHandler().handle(
+          new LogHandlerAppStartedEvent(app.appId, app.user,
+              app.credentials, ContainerLogsRetentionPolicy.ALL_CONTAINERS,
+              app.applicationACLs)); 
+    }
+  }
+
+  /**
+   * Handles the APPLICATION_LOG_HANDLING_INITED event that occurs after
+   * {@link LogAggregationService} has created the directories for the app
+   * and started the aggregation thread for the app.
+   * 
+   * In particular, this requests that the {@link ResourceLocalizationService}
+   * localize the application-scoped resources.
+   */
+  @SuppressWarnings("unchecked")
+  static class AppLogInitDoneTransition implements
+      SingleArcTransition<ApplicationImpl, ApplicationEvent> {
+    @Override
+    public void transition(ApplicationImpl app, ApplicationEvent event) {
       app.dispatcher.getEventHandler().handle(
           new ApplicationLocalizationEvent(
               LocalizationEventType.INIT_APPLICATION_RESOURCES, app));
@@ -248,13 +271,6 @@
       SingleArcTransition<ApplicationImpl, ApplicationEvent> {
     @Override
     public void transition(ApplicationImpl app, ApplicationEvent event) {
-
-      // Inform the logAggregator
-      app.dispatcher.getEventHandler().handle(
-          new LogHandlerAppStartedEvent(app.appId, app.user,
-              app.credentials, ContainerLogsRetentionPolicy.ALL_CONTAINERS,
-              app.applicationACLs)); 
-
       // Start all the containers waiting for ApplicationInit
       for (Container container : app.containers.values()) {
         app.dispatcher.getEventHandler().handle(new ContainerInitEvent(
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
index f7cf6c5..d00c61e 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
@@ -49,6 +49,9 @@
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationFinishEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.LogHandler;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent;
@@ -56,6 +59,7 @@
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEvent;
 import org.apache.hadoop.yarn.service.AbstractService;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 public class LogAggregationService extends AbstractService implements
@@ -146,13 +150,13 @@
     try {
       remoteFS = FileSystem.get(conf);
     } catch (IOException e) {
-      throw new YarnException("Unable to get Remote FileSystem isntance", e);
+      throw new YarnException("Unable to get Remote FileSystem instance", e);
     }
     boolean remoteExists = false;
     try {
       remoteExists = remoteFS.exists(this.remoteRootLogDir);
     } catch (IOException e) {
-      throw new YarnException("Failed to check for existance of remoteLogDir ["
+      throw new YarnException("Failed to check for existence of remoteLogDir ["
           + this.remoteRootLogDir + "]");
     }
     if (remoteExists) {
@@ -266,9 +270,26 @@
     }
   }
 
+  @SuppressWarnings("unchecked")
   private void initApp(final ApplicationId appId, String user,
       Credentials credentials, ContainerLogsRetentionPolicy logRetentionPolicy,
       Map<ApplicationAccessType, String> appAcls) {
+    ApplicationEvent eventResponse;
+    try {
+      initAppAggregator(appId, user, credentials, logRetentionPolicy, appAcls);
+      eventResponse = new ApplicationEvent(appId,
+          ApplicationEventType.APPLICATION_LOG_HANDLING_INITED);
+    } catch (YarnException e) {
+      eventResponse = new ApplicationFinishEvent(appId,
+          "Application failed to init aggregation: " + e.getMessage());
+    }
+    this.dispatcher.getEventHandler().handle(eventResponse);
+  }
+
+  @VisibleForTesting
+  public void initAppAggregator(final ApplicationId appId, String user,
+      Credentials credentials, ContainerLogsRetentionPolicy logRetentionPolicy,
+      Map<ApplicationAccessType, String> appAcls) {
 
     // Get user's FileSystem credentials
     UserGroupInformation userUgi =
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
index a90912e..3d5ad68 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
@@ -93,6 +93,7 @@
     super.stop();
   }
 
+  @SuppressWarnings("unchecked")
   @Override
   public void handle(LogHandlerEvent event) {
     switch (event.getType()) {
@@ -101,6 +102,9 @@
             (LogHandlerAppStartedEvent) event;
         this.appOwners.put(appStartedEvent.getApplicationId(),
             appStartedEvent.getUser());
+        this.dispatcher.getEventHandler().handle(
+            new ApplicationEvent(appStartedEvent.getApplicationId(),
+                ApplicationEventType.APPLICATION_LOG_HANDLING_INITED));
         break;
       case CONTAINER_FINISHED:
         // Ignore
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
new file mode 100644
index 0000000..6ed6e32
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
@@ -0,0 +1 @@
+org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security.LocalizerTokenIdentifier
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java
index 28b51c0..d91b3ac 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java
@@ -164,6 +164,39 @@
   }
 
   @Test
+  public void testNoDelete() throws Exception {
+    Random r = new Random();
+    long seed = r.nextLong();
+    r.setSeed(seed);
+    System.out.println("SEED: " + seed);
+    List<Path> dirs = buildDirs(r, base, 20);
+    createDirs(new Path("."), dirs);
+    FakeDefaultContainerExecutor exec = new FakeDefaultContainerExecutor();
+    Configuration conf = new Configuration();
+    conf.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, -1);
+    exec.setConf(conf);
+    DeletionService del = new DeletionService(exec);
+    del.init(conf);
+    del.start();
+    try {
+      for (Path p : dirs) {
+        del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo", p,
+            null);
+      }
+      int msecToWait = 20 * 1000;
+      for (Path p : dirs) {
+        while (msecToWait > 0 && lfs.util().exists(p)) {
+          Thread.sleep(100);
+          msecToWait -= 100;
+        }
+        assertTrue(lfs.util().exists(p));
+      }
+    } finally {
+      del.stop();
+    }
+  }
+
+  @Test
   public void testStopWithDelayedTasks() throws Exception {
     DeletionService del = new DeletionService(Mockito.mock(ContainerExecutor.class));
     Configuration conf = new YarnConfiguration();
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
index ab40335..2d30031 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
@@ -18,10 +18,7 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation;
 
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.*;
 import static junit.framework.Assert.assertEquals;
 import static junit.framework.Assert.assertTrue;
 
@@ -32,6 +29,7 @@
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.Writer;
+import java.lang.reflect.Method;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -47,6 +45,8 @@
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -61,6 +61,7 @@
 import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
+import org.apache.hadoop.yarn.event.Event;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
@@ -74,6 +75,7 @@
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationFinishEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerContainerFinishedEvent;
@@ -81,6 +83,7 @@
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
+import org.mortbay.util.MultiException;
 
 
 //@Ignore
@@ -112,7 +115,7 @@
 
   @Test
   @SuppressWarnings("unchecked")
-  public void testLocalFileDeletionAfterUpload() throws IOException {
+  public void testLocalFileDeletionAfterUpload() throws Exception {
     this.delSrvc = new DeletionService(createContainerExecutor());
     this.delSrvc.init(conf);
     this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
@@ -170,19 +173,23 @@
         logFilePath.toUri().getPath()).exists());
     
     dispatcher.await();
-    ArgumentCaptor<ApplicationEvent> eventCaptor =
-        ArgumentCaptor.forClass(ApplicationEvent.class);
-    verify(appEventHandler).handle(eventCaptor.capture());
-    assertEquals(ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED,
-        eventCaptor.getValue().getType());
-    assertEquals(appAttemptId.getApplicationId(), eventCaptor.getValue()
-        .getApplicationID());
     
+    ApplicationEvent expectedEvents[] = new ApplicationEvent[]{
+        new ApplicationEvent(
+            appAttemptId.getApplicationId(),
+            ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),
+        new ApplicationEvent(
+            appAttemptId.getApplicationId(),
+            ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)
+    };
+
+    checkEvents(appEventHandler, expectedEvents, true, "getType", "getApplicationID");
+    dispatcher.stop();
   }
 
   @Test
   @SuppressWarnings("unchecked")
-  public void testNoContainerOnNode() {
+  public void testNoContainerOnNode() throws Exception {
     this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
     this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
         this.remoteRootLogDir.getAbsolutePath());
@@ -218,19 +225,22 @@
         .exists());
     
     dispatcher.await();
-    ArgumentCaptor<ApplicationEvent> eventCaptor =
-        ArgumentCaptor.forClass(ApplicationEvent.class);
-    verify(appEventHandler).handle(eventCaptor.capture());
-    assertEquals(ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED,
-        eventCaptor.getValue().getType());
-    verify(appEventHandler).handle(eventCaptor.capture());
-    assertEquals(application1, eventCaptor.getValue()
-        .getApplicationID());
+    
+    ApplicationEvent expectedEvents[] = new ApplicationEvent[]{
+        new ApplicationEvent(
+            application1,
+            ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),
+        new ApplicationEvent(
+            application1,
+            ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)
+    };
+    checkEvents(appEventHandler, expectedEvents, true, "getType", "getApplicationID");
+    dispatcher.stop();
   }
 
   @Test
   @SuppressWarnings("unchecked")
-  public void testMultipleAppsLogAggregation() throws IOException {
+  public void testMultipleAppsLogAggregation() throws Exception {
 
     this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
     this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
@@ -299,9 +309,22 @@
     app3LogDir.mkdir();
     logAggregationService.handle(new LogHandlerAppStartedEvent(application3,
         this.user, null,
-        ContainerLogsRetentionPolicy.AM_AND_FAILED_CONTAINERS_ONLY, this.acls));
-        
+        ContainerLogsRetentionPolicy.AM_AND_FAILED_CONTAINERS_ONLY, this.acls));        
 
+    ApplicationEvent expectedInitEvents[] = new ApplicationEvent[]{
+        new ApplicationEvent(
+            application1,
+            ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),
+        new ApplicationEvent(
+            application2,
+            ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),
+        new ApplicationEvent(
+            application3,
+            ApplicationEventType.APPLICATION_LOG_HANDLING_INITED)
+    };
+    checkEvents(appEventHandler, expectedInitEvents, false, "getType", "getApplicationID");
+    reset(appEventHandler);
+    
     ContainerId container31 = BuilderUtils.newContainerId(appAttemptId3, 1);
     writeContainerLogs(app3LogDir, container31);
     logAggregationService.handle(
@@ -339,22 +362,59 @@
         new ContainerId[] { container31, container32 });
     
     dispatcher.await();
-    ArgumentCaptor<ApplicationEvent> eventCaptor =
-        ArgumentCaptor.forClass(ApplicationEvent.class);
-
-    verify(appEventHandler, times(3)).handle(eventCaptor.capture());
-    List<ApplicationEvent> capturedEvents = eventCaptor.getAllValues();
-    Set<ApplicationId> appIds = new HashSet<ApplicationId>();
-    for (ApplicationEvent cap : capturedEvents) {
-      assertEquals(ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED,
-          eventCaptor.getValue().getType());
-      appIds.add(cap.getApplicationID());
-    }
-    assertTrue(appIds.contains(application1));
-    assertTrue(appIds.contains(application2));
-    assertTrue(appIds.contains(application3));
+    
+    ApplicationEvent[] expectedFinishedEvents = new ApplicationEvent[]{
+        new ApplicationEvent(
+            application1,
+            ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED),
+        new ApplicationEvent(
+            application2,
+            ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED),
+        new ApplicationEvent(
+            application3,
+            ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)
+    };
+    checkEvents(appEventHandler, expectedFinishedEvents, false, "getType", "getApplicationID");
+    dispatcher.stop();
   }
-
+  
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testLogAggregationInitFailsWithoutKillingNM() throws Exception {
+    
+    this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
+    this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
+        this.remoteRootLogDir.getAbsolutePath());
+    
+    DrainDispatcher dispatcher = createDispatcher();
+    EventHandler<ApplicationEvent> appEventHandler = mock(EventHandler.class);
+    dispatcher.register(ApplicationEventType.class, appEventHandler);
+    
+    LogAggregationService logAggregationService = spy(
+        new LogAggregationService(dispatcher, this.context, this.delSrvc,
+                                  super.dirsHandler));
+    logAggregationService.init(this.conf);
+    logAggregationService.start();
+    
+    ApplicationId appId = BuilderUtils.newApplicationId(
+        System.currentTimeMillis(), (int)Math.random());
+    doThrow(new YarnException("KABOOM!"))
+      .when(logAggregationService).initAppAggregator(
+          eq(appId), eq(user), any(Credentials.class),
+          any(ContainerLogsRetentionPolicy.class), anyMap());
+    
+    logAggregationService.handle(new LogHandlerAppStartedEvent(appId,
+        this.user, null,
+        ContainerLogsRetentionPolicy.AM_AND_FAILED_CONTAINERS_ONLY, this.acls));        
+    
+    dispatcher.await();
+    ApplicationEvent expectedEvents[] = new ApplicationEvent[]{
+        new ApplicationFinishEvent(appId, "Application failed to init aggregation: KABOOM!")
+    };
+    checkEvents(appEventHandler, expectedEvents, false,
+        "getType", "getApplicationID", "getDiagnostic");    
+  }
+  
   private void writeContainerLogs(File appLogDir, ContainerId containerId)
       throws IOException {
     // ContainerLogDir should be created
@@ -599,4 +659,77 @@
     Assert.assertEquals("Log aggregator failed to cleanup!", 0,
         logAggregationService.getNumAggregators());
   }
+  
+  @SuppressWarnings("unchecked")
+  private static <T extends Event<?>>
+  void checkEvents(EventHandler<T> eventHandler,
+                   T expectedEvents[], boolean inOrder,
+                   String... methods) throws Exception {
+    Class<T> genericClass = (Class<T>)expectedEvents.getClass().getComponentType();
+    ArgumentCaptor<T> eventCaptor = ArgumentCaptor.forClass(genericClass);
+    // captor work work unless used via a verify
+    verify(eventHandler, atLeast(0)).handle(eventCaptor.capture());
+    List<T> actualEvents = eventCaptor.getAllValues();
+
+    // batch up exceptions so junit presents them as one
+    MultiException failures = new MultiException();
+    try {
+      assertEquals("expected events", expectedEvents.length, actualEvents.size());
+    } catch (Throwable e) {
+      failures.add(e);
+    }
+    if (inOrder) {
+    	// sequentially verify the events
+      int len = Math.max(expectedEvents.length, actualEvents.size());
+      for (int n=0; n < len; n++) {
+        try {
+          String expect = (n < expectedEvents.length)
+              ? eventToString(expectedEvents[n], methods) : null;
+          String actual = (n < actualEvents.size())
+              ? eventToString(actualEvents.get(n), methods) : null;
+          assertEquals("event#"+n, expect, actual);
+        } catch (Throwable e) {
+          failures.add(e);
+        }
+      }
+    } else {
+    	// verify the actual events were expected
+    	// verify no expected events were not seen
+      Set<String> expectedSet = new HashSet<String>();
+      for (T expectedEvent : expectedEvents) {
+        expectedSet.add(eventToString(expectedEvent, methods));
+      }
+      for (T actualEvent : actualEvents) {
+        try {
+          String actual = eventToString(actualEvent, methods);
+          assertTrue("unexpected event: "+actual, expectedSet.remove(actual));
+        } catch (Throwable e) {
+          failures.add(e);
+        }
+      }
+      for (String expected : expectedSet) {
+        try {
+          Assert.fail("missing event: "+expected);
+        } catch (Throwable e) {
+          failures.add(e);
+        }
+      }
+    }
+    failures.ifExceptionThrow();
+  }
+  
+  private static String eventToString(Event<?> event, String[] methods) throws Exception {
+    StringBuilder sb = new StringBuilder("[ ");
+    for (String m : methods) {
+      try {
+      	Method method = event.getClass().getMethod(m);
+        String value = method.invoke(event).toString();
+        sb.append(method.getName()).append("=").append(value).append(" ");
+      } catch (Exception e) {
+        // ignore, actual event may not implement the method...
+      }
+    }
+    sb.append("]");
+    return sb.toString();
+  }
 }
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index fd7b767..efe126e 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
index 1641381..33c79f6 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
@@ -27,6 +27,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.HostsFileReader;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -37,6 +38,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.service.AbstractService;
 
+@SuppressWarnings("unchecked")
 public class NodesListManager extends AbstractService implements
     EventHandler<NodesListManagerEvent> {
 
@@ -112,8 +114,10 @@
     synchronized (hostsReader) {
       Set<String> hostsList = hostsReader.getHosts();
       Set<String> excludeList = hostsReader.getExcludedHosts();
-      return ((hostsList.isEmpty() || hostsList.contains(hostName)) && 
-          !excludeList.contains(hostName));
+      String ip = NetUtils.normalizeHostName(hostName);
+      return (hostsList.isEmpty() || hostsList.contains(hostName) || hostsList
+          .contains(ip))
+          && !(excludeList.contains(hostName) || excludeList.contains(ip));
     }
   }
   
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java
index 34d2035..0db42e4 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java
@@ -93,10 +93,12 @@
                         ni.getNodeHealthStatus().getLastHealthReportTime());
         info.put("HealthReport",
                         ni.getNodeHealthStatus().getHealthReport());
-        info.put("NumContainersMB", report.getNumContainers());
-        info.put("UsedMemoryMB", report.getUsedResource().getMemory());
-        info.put("AvailableMemoryMB",
-                                report.getAvailableResource().getMemory());
+        if(report != null) {
+          info.put("NumContainers", report.getNumContainers());
+          info.put("UsedMemoryMB", report.getUsedResource().getMemory());
+          info.put("AvailableMemoryMB",
+              report.getAvailableResource().getMemory());
+        }
 
         nodesInfo.add(info);
     }
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
index 46df0c7..63ff844 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
@@ -20,6 +20,8 @@
 
 import java.util.Collection;
 
+import java.util.Map;
+
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
@@ -94,6 +96,13 @@
   RMAppAttempt getCurrentAppAttempt();
 
   /**
+   * {@link RMApp} can have multiple application attempts {@link RMAppAttempt}.
+   * This method returns the all {@link RMAppAttempt}s for the RMApp.
+   * @return all {@link RMAppAttempt}s for the RMApp.
+   */
+  Map<ApplicationAttemptId, RMAppAttempt> getAppAttempts();
+
+  /**
    * To get the status of an application in the RM, this method can be used.
    * If full access is not allowed then the following fields in the report
    * will be stubbed:
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 43fc991..1cf1ca2 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
 
 import java.util.Collection;
+import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.LinkedHashMap;
@@ -312,6 +313,17 @@
   }
 
   @Override
+  public Map<ApplicationAttemptId, RMAppAttempt> getAppAttempts() {
+    this.readLock.lock();
+
+    try {
+      return Collections.unmodifiableMap(this.attempts);
+    } finally {
+      this.readLock.unlock();
+    }
+  }
+
+  @Override
   public ApplicationStore getApplicationStore() {
     return this.appStore;
   }
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
index 7c5d181..57d78e9 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
@@ -145,9 +145,15 @@
    */
   ApplicationSubmissionContext getSubmissionContext();
 
-  /*
+  /**
    * Get application container and resource usage information.
    * @return an ApplicationResourceUsageReport object.
    */
   ApplicationResourceUsageReport getApplicationResourceUsageReport();
+
+  /**
+   * the start time of the application.
+   * @return the start time of the application.
+   */
+  long getStartTime();
 }
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 334f797..151c815 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -119,7 +119,8 @@
   private int rpcPort;
   private String origTrackingUrl = "N/A";
   private String proxiedTrackingUrl = "N/A";
-  
+  private long startTime = 0;
+
   // Set to null initially. Will eventually get set 
   // if an RMAppAttemptUnregistrationEvent occurs
   private FinalApplicationStatus finalStatus = null;
@@ -543,6 +544,8 @@
     public void transition(RMAppAttemptImpl appAttempt,
         RMAppAttemptEvent event) {
 
+      appAttempt.startTime = System.currentTimeMillis();
+
       // Register with the ApplicationMasterService
       appAttempt.masterService
           .registerAppAttempt(appAttempt.applicationAttemptId);
@@ -912,4 +915,14 @@
       return RMAppAttemptState.RUNNING;
     }
   }
+
+  @Override
+  public long getStartTime() {
+    this.readLock.lock();
+    try {
+      return this.startTime;
+    } finally {
+      this.readLock.unlock();
+    }
+  }
 }
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
index 936e65e..bd44fcf 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -55,7 +55,7 @@
   @Metric("# of pending apps") MutableGaugeInt appsPending;
   @Metric("# of apps completed") MutableCounterInt appsCompleted;
   @Metric("# of apps killed") MutableCounterInt appsKilled;
-  @Metric("# of apps failed") MutableCounterInt appsFailed;
+  @Metric("# of apps failed") MutableGaugeInt appsFailed;
 
   @Metric("Allocated memory in MB") MutableGaugeInt allocatedMB;
   @Metric("# of allocated containers") MutableGaugeInt allocatedContainers;
@@ -181,15 +181,19 @@
     registry.snapshot(collector.addRecord(registry.info()), all);
   }
 
-  public void submitApp(String user) {
-    appsSubmitted.incr();
+  public void submitApp(String user, int attemptId) {
+    if (attemptId == 1) {
+      appsSubmitted.incr();
+    } else {
+      appsFailed.decr();
+    }
     appsPending.incr();
     QueueMetrics userMetrics = getUserMetrics(user);
     if (userMetrics != null) {
-      userMetrics.submitApp(user);
+      userMetrics.submitApp(user, attemptId);
     }
     if (parent != null) {
-      parent.submitApp(user);
+      parent.submitApp(user, attemptId);
     }
   }
 
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 2256799..75d5249 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -632,9 +632,7 @@
     }
 
     int attemptId = application.getApplicationAttemptId().getAttemptId();
-    if (attemptId == 1) {
-      metrics.submitApp(userName);
-    }
+    metrics.submitApp(userName, attemptId);
 
     // Inform the parent queue
     try {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
index cb83e49..a33a37d 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
@@ -302,9 +302,7 @@
         new SchedulerApp(appAttemptId, user, DEFAULT_QUEUE, activeUsersManager,
             this.rmContext, null);
     applications.put(appAttemptId, schedulerApp);
-    if (appAttemptId.getAttemptId() == 1) {
-      metrics.submitApp(user);
-    }
+    metrics.submitApp(user, appAttemptId.getAttemptId());
     LOG.info("Application Submission: " + appAttemptId.getApplicationId() + 
         " from " + user + ", currently active: " + applications.size());
     rmContext.getDispatcher().getEventHandler().handle(
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
index 54ac79b..3dcd2f0 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
@@ -20,6 +20,13 @@
 
 import static org.apache.hadoop.yarn.util.StringHelper.join;
 import static org.apache.hadoop.yarn.webapp.YarnWebParams.APPLICATION_ID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._EVEN;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
+
+
+import java.util.Collection;
 
 import com.google.inject.Inject;
 
@@ -29,19 +36,23 @@
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.util.Apps;
 import org.apache.hadoop.yarn.util.Times;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
-import org.apache.hadoop.yarn.webapp.ResponseInfo;
 
 public class AppBlock extends HtmlBlock {
 
   private ApplicationACLsManager aclsManager;
-  
+
   @Inject
   AppBlock(ResourceManager rm, ViewContext ctx, ApplicationACLsManager aclsManager) {
     super(ctx);
@@ -88,7 +99,7 @@
 
     setTitle(join("Application ", aid));
 
-    ResponseInfo info = info("Application Overview").
+    info("Application Overview").
       _("User:", app.getUser()).
       _("Name:", app.getName()).
       _("State:", app.getState()).
@@ -99,12 +110,40 @@
       _("Tracking URL:", !app.isTrackingUrlReady() ?
         "#" : app.getTrackingUrlPretty(), app.getTrackingUI()).
       _("Diagnostics:", app.getNote());
-    if (app.amContainerLogsExist()) {
-      info._("AM container logs:", app.getAMContainerLogs(), app.getAMContainerLogs());
-    } else {
-      info._("AM container logs:", "");
+
+    Collection<RMAppAttempt> attempts = rmApp.getAppAttempts().values();
+    String amString =
+        attempts.size() == 1 ? "ApplicationMaster" : "ApplicationMasters";
+
+    DIV<Hamlet> div = html.
+        _(InfoBlock.class).
+        div(_INFO_WRAP);
+    // MRAppMasters Table
+    TABLE<DIV<Hamlet>> table = div.table("#app");
+    table.
+      tr().
+        th(amString).
+      _().
+      tr().
+        th(_TH, "Attempt Number").
+        th(_TH, "Start Time").
+        th(_TH, "Node").
+        th(_TH, "Logs").
+      _();
+
+    boolean odd = false;
+    for (RMAppAttempt attempt : attempts) {
+      AppAttemptInfo attemptInfo = new AppAttemptInfo(attempt);
+      table.tr((odd = !odd) ? _ODD : _EVEN).
+        td(String.valueOf(attemptInfo.getAttemptId())).
+        td(Times.format(attemptInfo.getStartTime())).
+        td().a(".nodelink", url("http://", attemptInfo.getNodeHttpAddress()),
+            attemptInfo.getNodeHttpAddress())._().
+        td().a(".logslink", url(attemptInfo.getLogsLink()), "logs")._().
+      _();
     }
 
-    html._(InfoBlock.class);
+    table._();
+    div._();
   }
 }
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java
index c7ce9c8..12e77a7 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java
@@ -31,6 +31,8 @@
 import javax.xml.bind.JAXBContext;
 
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfo;
@@ -53,7 +55,8 @@
   private final Set<Class> types;
 
   // you have to specify all the dao classes here
-  private final Class[] cTypes = { AppInfo.class, ClusterInfo.class,
+  private final Class[] cTypes = { AppInfo.class, AppAttemptInfo.class,
+      AppAttemptsInfo.class, ClusterInfo.class,
       CapacitySchedulerQueueInfo.class, FifoSchedulerInfo.class,
       SchedulerTypeInfo.class, NodeInfo.class, UserMetricsInfo.class,
       CapacitySchedulerInfo.class, ClusterMetricsInfo.class,
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 857367a..334d3a8 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -45,11 +45,14 @@
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
@@ -385,4 +388,31 @@
     return new AppInfo(app, hasAccess(app, hsr));
   }
 
+  @GET
+  @Path("/apps/{appid}/appattempts")
+  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  public AppAttemptsInfo getAppAttempts(@PathParam("appid") String appId) {
+
+    init();
+    if (appId == null || appId.isEmpty()) {
+      throw new NotFoundException("appId, " + appId + ", is empty or null");
+    }
+    ApplicationId id;
+    id = ConverterUtils.toApplicationId(recordFactory, appId);
+    if (id == null) {
+      throw new NotFoundException("appId is null");
+    }
+    RMApp app = rm.getRMContext().getRMApps().get(id);
+    if (app == null) {
+      throw new NotFoundException("app with id: " + appId + " not found");
+    }
+
+    AppAttemptsInfo appAttemptsInfo = new AppAttemptsInfo();
+    for (RMAppAttempt attempt : app.getAppAttempts().values()) {
+      AppAttemptInfo attemptInfo = new AppAttemptInfo(attempt);
+      appAttemptsInfo.add(attemptInfo);
+    }
+
+    return appAttemptsInfo;
+  }
 }
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
new file mode 100644
index 0000000..5ad726e
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import static org.apache.hadoop.yarn.util.StringHelper.join;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+@XmlRootElement(name = "appAttempt")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class AppAttemptInfo {
+
+  protected int id;
+  protected long startTime;
+  protected String containerId;
+  protected String nodeHttpAddress;
+  protected String nodeId;
+  protected String logsLink;
+
+  public AppAttemptInfo() {
+  }
+
+  public AppAttemptInfo(RMAppAttempt attempt) {
+    this.startTime = 0;
+    this.containerId = "";
+    this.nodeHttpAddress = "";
+    this.nodeId = "";
+    this.logsLink = "";
+    if (attempt != null) {
+      this.id = attempt.getAppAttemptId().getAttemptId();
+      this.startTime = attempt.getStartTime();
+      Container masterContainer = attempt.getMasterContainer();
+      if (masterContainer != null) {
+        this.containerId = masterContainer.getId().toString();
+        this.nodeHttpAddress = masterContainer.getNodeHttpAddress();
+        this.nodeId = masterContainer.getNodeId().toString();
+        this.logsLink = join("http://", masterContainer.getNodeHttpAddress(),
+            "/node", "/containerlogs/",
+            ConverterUtils.toString(masterContainer.getId()),
+            "/", attempt.getSubmissionContext().getUser());
+      }
+    }
+  }
+
+  public int getAttemptId() {
+    return this.id;
+  }
+
+  public long getStartTime() {
+    return this.startTime;
+  }
+
+  public String getNodeHttpAddress() {
+    return this.nodeHttpAddress;
+  }
+
+  public String getLogsLink() {
+    return this.logsLink;
+  }
+}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptsInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptsInfo.java
new file mode 100644
index 0000000..07b93f2
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptsInfo.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by joblicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import java.util.ArrayList;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+@XmlRootElement(name = "appAttempts")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class AppAttemptsInfo {
+
+  @XmlElement(name = "appAttempt")
+  protected ArrayList<AppAttemptInfo> attempt = new ArrayList<AppAttemptInfo>();
+
+  public AppAttemptsInfo() {
+  } // JAXB needs this
+
+  public void add(AppAttemptInfo info) {
+    this.attempt.add(info);
+  }
+
+  public ArrayList<AppAttemptInfo> getAttempts() {
+    return this.attempt;
+  }
+
+}
+
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
index 7826819..d785255 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
@@ -28,14 +28,14 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
-import org.apache.hadoop.yarn.factories.RecordFactory;
-import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
 import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
@@ -52,8 +52,6 @@
       "test.build.data", "/tmp"), "decommision");
   private File hostFile = new File(TEMP_DIR + File.separator + "hostFile.txt");
   private MockRM rm;
-  private static final RecordFactory recordFactory = RecordFactoryProvider
-      .getRecordFactory(null);
 
   /**
    * decommissioning using a include hosts file
@@ -61,9 +59,9 @@
   @Test
   public void testDecommissionWithIncludeHosts() throws Exception {
 
-    writeToHostsFile("host1", "host2");
+    writeToHostsFile("localhost", "host1", "host2");
     Configuration conf = new Configuration();
-    conf.set("yarn.resourcemanager.nodes.include-path", hostFile
+    conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, hostFile
         .getAbsolutePath());
 
     rm = new MockRM(conf);
@@ -71,17 +69,22 @@
 
     MockNM nm1 = rm.registerNode("host1:1234", 5120);
     MockNM nm2 = rm.registerNode("host2:5678", 10240);
+    MockNM nm3 = rm.registerNode("localhost:4433", 1024);
     
     ClusterMetrics metrics = ClusterMetrics.getMetrics();
     assert(metrics != null);
-    int initialMetricCount = metrics.getNumDecommisionedNMs();
+    int metricCount = metrics.getNumDecommisionedNMs();
 
     HeartbeatResponse nodeHeartbeat = nm1.nodeHeartbeat(true);
     Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
     nodeHeartbeat = nm2.nodeHeartbeat(true);
     Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
+    nodeHeartbeat = nm3.nodeHeartbeat(true);
+    Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
 
-    writeToHostsFile("host1");
+    // To test that IPs also work
+    String ip = NetUtils.normalizeHostName("localhost");
+    writeToHostsFile("host1", ip);
 
     rm.getNodesListManager().refreshNodes();
 
@@ -94,7 +97,12 @@
     Assert.assertTrue("Node is not decommisioned.", NodeAction.SHUTDOWN
         .equals(nodeHeartbeat.getNodeAction()));
 
-    checkDecommissionedNMCount(rm, ++initialMetricCount);
+    checkDecommissionedNMCount(rm, ++metricCount);
+
+    nodeHeartbeat = nm3.nodeHeartbeat(true);
+    Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
+    Assert.assertEquals(metricCount, ClusterMetrics.getMetrics()
+      .getNumDecommisionedNMs());
   }
 
   /**
@@ -103,7 +111,7 @@
   @Test
   public void testDecommissionWithExcludeHosts() throws Exception {
     Configuration conf = new Configuration();
-    conf.set("yarn.resourcemanager.nodes.exclude-path", hostFile
+    conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, hostFile
         .getAbsolutePath());
 
     writeToHostsFile("");
@@ -112,16 +120,18 @@
 
     MockNM nm1 = rm.registerNode("host1:1234", 5120);
     MockNM nm2 = rm.registerNode("host2:5678", 10240);
+    MockNM nm3 = rm.registerNode("localhost:4433", 1024);
 
-    int initialMetricCount = ClusterMetrics.getMetrics()
-        .getNumDecommisionedNMs();
+    int metricCount = ClusterMetrics.getMetrics().getNumDecommisionedNMs();
 
     HeartbeatResponse nodeHeartbeat = nm1.nodeHeartbeat(true);
     Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
     nodeHeartbeat = nm2.nodeHeartbeat(true);
     Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
 
-    writeToHostsFile("host2");
+    // To test that IPs also work
+    String ip = NetUtils.normalizeHostName("localhost");
+    writeToHostsFile("host2", ip);
 
     rm.getNodesListManager().refreshNodes();
 
@@ -130,14 +140,19 @@
     nodeHeartbeat = nm2.nodeHeartbeat(true);
     Assert.assertTrue("The decommisioned metrics are not updated",
         NodeAction.SHUTDOWN.equals(nodeHeartbeat.getNodeAction()));
-    checkDecommissionedNMCount(rm, ++initialMetricCount);
+    checkDecommissionedNMCount(rm, ++metricCount);
+
+    nodeHeartbeat = nm3.nodeHeartbeat(true);
+    Assert.assertTrue("The decommisioned metrics are not updated",
+        NodeAction.SHUTDOWN.equals(nodeHeartbeat.getNodeAction()));
+    checkDecommissionedNMCount(rm, ++metricCount);
   }
   
   @Test
   public void testNodeRegistrationFailure() throws Exception {
     writeToHostsFile("host1");
     Configuration conf = new Configuration();
-    conf.set("yarn.resourcemanager.nodes.include-path", hostFile
+    conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, hostFile
         .getAbsolutePath());
     rm = new MockRM(conf);
     rm.start();
@@ -191,7 +206,7 @@
   @Test
   public void testUnhealthyNodeStatus() throws Exception {
     Configuration conf = new Configuration();
-    conf.set("yarn.resourcemanager.nodes.exclude-path", hostFile
+    conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, hostFile
         .getAbsolutePath());
 
     rm = new MockRM(conf);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
index db203d2..81aba39 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
@@ -19,6 +19,7 @@
 
 import java.util.Collection;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.yarn.MockApps;
@@ -189,6 +190,10 @@
       throw new UnsupportedOperationException("Not supported yet.");
     }
     @Override
+    public Map<ApplicationAttemptId, RMAppAttempt> getAppAttempts() {
+      throw new UnsupportedOperationException("Not supported yet.");
+    }
+    @Override
     public ApplicationStore getApplicationStore() {
       throw new UnsupportedOperationException("Not supported yet.");
     }
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
index 983b29c..e5ea2b86 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
@@ -20,6 +20,9 @@
 
 import java.util.Collection;
 
+import java.util.LinkedHashMap;
+import java.util.Map;
+
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -113,6 +116,14 @@
   }
 
   @Override
+  public Map<ApplicationAttemptId, RMAppAttempt> getAppAttempts() {
+    Map<ApplicationAttemptId, RMAppAttempt> attempts =
+      new LinkedHashMap<ApplicationAttemptId, RMAppAttempt>();
+    attempts.put(attempt.getAppAttemptId(), attempt);
+    return attempts;
+  }
+
+  @Override
   public RMAppAttempt getCurrentAppAttempt() {
     return attempt;
   }
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java
index 05f5fdb..5ae32f6 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java
@@ -32,9 +32,12 @@
 import org.apache.hadoop.metrics2.MetricsSource;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.junit.Test;
 
 public class TestQueueMetrics {
@@ -52,7 +55,7 @@
     MetricsSource queueSource= queueSource(ms, queueName);
     AppSchedulingInfo app = mockApp(user);
 
-    metrics.submitApp(user);
+    metrics.submitApp(user, 1);
     MetricsSource userSource = userSource(ms, queueName, user);
     checkApps(queueSource, 1, 1, 0, 0, 0, 0);
 
@@ -75,6 +78,53 @@
     checkApps(queueSource, 1, 0, 0, 1, 0, 0);
     assertNull(userSource);
   }
+  
+  @Test
+  public void testQueueAppMetricsForMultipleFailures() {
+    String queueName = "single";
+    String user = "alice";
+
+    QueueMetrics metrics = QueueMetrics.forQueue(ms, queueName, null, false,
+        new Configuration());
+    MetricsSource queueSource = queueSource(ms, queueName);
+    AppSchedulingInfo app = mockApp(user);
+
+    metrics.submitApp(user, 1);
+    MetricsSource userSource = userSource(ms, queueName, user);
+    checkApps(queueSource, 1, 1, 0, 0, 0, 0);
+
+    metrics.incrAppsRunning(app, user);
+    checkApps(queueSource, 1, 0, 1, 0, 0, 0);
+
+    metrics.finishApp(app, RMAppAttemptState.FAILED);
+    checkApps(queueSource, 1, 0, 0, 0, 1, 0);
+
+    // As the application has failed, framework retries the same application
+    // based on configuration
+    metrics.submitApp(user, 2);
+    checkApps(queueSource, 1, 1, 0, 0, 0, 0);
+
+    metrics.incrAppsRunning(app, user);
+    checkApps(queueSource, 1, 0, 1, 0, 0, 0);
+
+    // Suppose say application has failed this time as well.
+    metrics.finishApp(app, RMAppAttemptState.FAILED);
+    checkApps(queueSource, 1, 0, 0, 0, 1, 0);
+
+    // As the application has failed, framework retries the same application
+    // based on configuration
+    metrics.submitApp(user, 3);
+    checkApps(queueSource, 1, 1, 0, 0, 0, 0);
+
+    metrics.incrAppsRunning(app, user);
+    checkApps(queueSource, 1, 0, 1, 0, 0, 0);
+
+    // Suppose say application has finished.
+    metrics.finishApp(app, RMAppAttemptState.FINISHED);
+    checkApps(queueSource, 1, 0, 0, 1, 0, 0);
+
+    assertNull(userSource);
+  }
 
   @Test public void testSingleQueueWithUserMetrics() {
     String queueName = "single2";
@@ -85,7 +135,7 @@
     MetricsSource queueSource = queueSource(ms, queueName);
     AppSchedulingInfo app = mockApp(user);
 
-    metrics.submitApp(user);
+    metrics.submitApp(user, 1);
     MetricsSource userSource = userSource(ms, queueName, user);
 
     checkApps(queueSource, 1, 1, 0, 0, 0, 0);
@@ -131,7 +181,7 @@
     MetricsSource queueSource = queueSource(ms, leafQueueName);
     AppSchedulingInfo app = mockApp(user);
 
-    metrics.submitApp(user);
+    metrics.submitApp(user, 1);
     MetricsSource userSource = userSource(ms, leafQueueName, user);
     MetricsSource parentUserSource = userSource(ms, parentQueueName, user);
 
@@ -184,7 +234,7 @@
     assertGauge("AppsPending", pending, rb);
     assertGauge("AppsRunning", running, rb);
     assertCounter("AppsCompleted", completed, rb);
-    assertCounter("AppsFailed", failed, rb);
+    assertGauge("AppsFailed", failed, rb);
     assertCounter("AppsKilled", killed, rb);
   }
 
@@ -207,6 +257,9 @@
   private static AppSchedulingInfo mockApp(String user) {
     AppSchedulingInfo app = mock(AppSchedulingInfo.class);
     when(app.getUser()).thenReturn(user);
+    ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
+    ApplicationAttemptId id = BuilderUtils.newApplicationAttemptId(appId, 1);
+    when(app.getApplicationAttemptId()).thenReturn(id);
     return app;
   }
 
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index caec362..40d44aa 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -85,6 +85,7 @@
 
   @After
   public void tearDown() throws Exception {
+    resourceManager.stop();
   }
   
   private org.apache.hadoop.yarn.server.resourcemanager.NodeManager
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index 8be9b20..7571a0f 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -55,6 +55,7 @@
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
@@ -63,6 +64,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -89,7 +91,8 @@
 
   @Before
   public void setUp() throws Exception {
-    cs = new CapacityScheduler();
+    CapacityScheduler spyCs = new CapacityScheduler();
+    cs = spy(spyCs);
     rmContext = TestUtils.getMockRMContext();
     
     csConf = 
@@ -309,6 +312,14 @@
     SchedulerApp app_0 = new SchedulerApp(appAttemptId_0, user_0, a, null,
         rmContext, null);
     a.submitApplication(app_0, user_0, B);
+    
+    when(cs.getApplication(appAttemptId_0)).thenReturn(app_0);
+    AppRemovedSchedulerEvent event = new AppRemovedSchedulerEvent(
+        appAttemptId_0, RMAppAttemptState.FAILED);
+    cs.handle(event);
+    
+    assertEquals(0, a.getMetrics().getAppsPending());
+    assertEquals(1, a.getMetrics().getAppsFailed());
 
     // Attempt the same application again
     final ApplicationAttemptId appAttemptId_1 = TestUtils
@@ -319,6 +330,16 @@
 
     assertEquals(1, a.getMetrics().getAppsSubmitted());
     assertEquals(1, a.getMetrics().getAppsPending());
+    
+    when(cs.getApplication(appAttemptId_1)).thenReturn(app_0);
+    event = new AppRemovedSchedulerEvent(appAttemptId_0,
+        RMAppAttemptState.FINISHED);
+    cs.handle(event);
+    
+    assertEquals(1, a.getMetrics().getAppsSubmitted());
+    assertEquals(0, a.getMetrics().getAppsPending());
+    assertEquals(0, a.getMetrics().getAppsFailed());
+    assertEquals(1, a.getMetrics().getAppsCompleted());
 
     QueueMetrics userMetrics = a.getMetrics().getUserMetrics(user_0);
     assertEquals(1, userMetrics.getAppsSubmitted());
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
index 123d235..e7c46dc 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
@@ -64,6 +64,7 @@
 
   @After
   public void tearDown() throws Exception {
+    resourceManager.stop();
   }
   
   private org.apache.hadoop.yarn.server.resourcemanager.NodeManager
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
index 3a30b92..427dcf8 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
@@ -23,6 +23,7 @@
 import static org.junit.Assert.fail;
 
 import java.io.StringReader;
+import java.util.Collection;
 
 import javax.ws.rs.core.MediaType;
 import javax.xml.parsers.DocumentBuilder;
@@ -31,13 +32,18 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppFailedAttemptEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
@@ -73,7 +79,9 @@
       bind(JAXBContextResolver.class);
       bind(RMWebServices.class);
       bind(GenericExceptionHandler.class);
-      rm = new MockRM(new Configuration());
+      Configuration conf = new Configuration();
+      conf.setInt(YarnConfiguration.RM_AM_MAX_RETRIES, 2);
+      rm = new MockRM(conf);
       bind(ResourceManager.class).toInstance(rm);
       bind(RMContext.class).toInstance(rm.getRMContext());
       bind(ApplicationACLsManager.class).toInstance(
@@ -835,4 +843,234 @@
         amContainerLogs.endsWith("/" + app.getUser()));
   }
 
+  @Test
+  public void testAppAttempts() throws JSONException, Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+    RMApp app1 = rm.submitApp(1024, "testwordcount", "user1");
+    amNodeManager.nodeHeartbeat(true);
+    testAppAttemptsHelper(app1.getApplicationId().toString(), app1,
+        MediaType.APPLICATION_JSON);
+    rm.stop();
+  }
+
+  @Test
+  public void testMultipleAppAttempts() throws JSONException, Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+    RMApp app1 = rm.submitApp(1024, "testwordcount", "user1");
+    amNodeManager.nodeHeartbeat(true);
+    int maxRetries = rm.getConfig().getInt(YarnConfiguration.RM_AM_MAX_RETRIES,
+        YarnConfiguration.DEFAULT_RM_AM_MAX_RETRIES);
+    int retriesLeft = maxRetries;
+    while (--retriesLeft > 0) {
+      RMAppEvent event =
+          new RMAppFailedAttemptEvent(app1.getApplicationId(),
+              RMAppEventType.ATTEMPT_FAILED, "");
+      app1.handle(event);
+      rm.waitForState(app1.getApplicationId(), RMAppState.ACCEPTED);
+      amNodeManager.nodeHeartbeat(true);
+    }
+    assertEquals("incorrect number of attempts", maxRetries,
+        app1.getAppAttempts().values().size());
+    testAppAttemptsHelper(app1.getApplicationId().toString(), app1,
+        MediaType.APPLICATION_JSON);
+    rm.stop();
+  }
+
+  @Test
+  public void testAppAttemptsSlash() throws JSONException, Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+    RMApp app1 = rm.submitApp(1024);
+    amNodeManager.nodeHeartbeat(true);
+    testAppAttemptsHelper(app1.getApplicationId().toString() + "/", app1,
+        MediaType.APPLICATION_JSON);
+    rm.stop();
+  }
+
+  @Test
+  public void testAppAttemtpsDefault() throws JSONException, Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+    RMApp app1 = rm.submitApp(1024);
+    amNodeManager.nodeHeartbeat(true);
+    testAppAttemptsHelper(app1.getApplicationId().toString() + "/", app1, "");
+    rm.stop();
+  }
+
+  @Test
+  public void testInvalidAppAttempts() throws JSONException, Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+    rm.submitApp(1024);
+    amNodeManager.nodeHeartbeat(true);
+    WebResource r = resource();
+
+    try {
+      r.path("ws").path("v1").path("cluster").path("apps")
+          .path("application_invalid_12").accept(MediaType.APPLICATION_JSON)
+          .get(JSONObject.class);
+      fail("should have thrown exception on invalid appid");
+    } catch (UniformInterfaceException ue) {
+      ClientResponse response = ue.getResponse();
+
+      assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject msg = response.getEntity(JSONObject.class);
+      JSONObject exception = msg.getJSONObject("RemoteException");
+      assertEquals("incorrect number of elements", 3, exception.length());
+      String message = exception.getString("message");
+      String type = exception.getString("exception");
+      String classname = exception.getString("javaClassName");
+      WebServicesTestUtils.checkStringMatch("exception message",
+          "For input string: \"invalid\"", message);
+      WebServicesTestUtils.checkStringMatch("exception type",
+          "NumberFormatException", type);
+      WebServicesTestUtils.checkStringMatch("exception classname",
+          "java.lang.NumberFormatException", classname);
+
+    } finally {
+      rm.stop();
+    }
+  }
+
+  @Test
+  public void testNonexistAppAttempts() throws JSONException, Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+    rm.submitApp(1024, "testwordcount", "user1");
+    amNodeManager.nodeHeartbeat(true);
+    WebResource r = resource();
+
+    try {
+      r.path("ws").path("v1").path("cluster").path("apps")
+          .path("application_00000_0099").accept(MediaType.APPLICATION_JSON)
+          .get(JSONObject.class);
+      fail("should have thrown exception on invalid appid");
+    } catch (UniformInterfaceException ue) {
+      ClientResponse response = ue.getResponse();
+
+      assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+
+      JSONObject msg = response.getEntity(JSONObject.class);
+      JSONObject exception = msg.getJSONObject("RemoteException");
+      assertEquals("incorrect number of elements", 3, exception.length());
+      String message = exception.getString("message");
+      String type = exception.getString("exception");
+      String classname = exception.getString("javaClassName");
+      WebServicesTestUtils.checkStringMatch("exception message",
+          "java.lang.Exception: app with id: application_00000_0099 not found",
+          message);
+      WebServicesTestUtils.checkStringMatch("exception type",
+          "NotFoundException", type);
+      WebServicesTestUtils.checkStringMatch("exception classname",
+          "org.apache.hadoop.yarn.webapp.NotFoundException", classname);
+    } finally {
+      rm.stop();
+    }
+  }
+
+  public void testAppAttemptsHelper(String path, RMApp app, String media)
+      throws JSONException, Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("cluster")
+        .path("apps").path(path).path("appattempts").accept(media)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    JSONObject jsonAppAttempts = json.getJSONObject("appAttempts");
+    assertEquals("incorrect number of elements", 1, jsonAppAttempts.length());
+    JSONArray jsonArray = jsonAppAttempts.getJSONArray("appAttempt");
+
+    Collection<RMAppAttempt> attempts = app.getAppAttempts().values();
+    assertEquals("incorrect number of elements", attempts.size(),
+        jsonArray.length());
+
+    // Verify these parallel arrays are the same
+    int i = 0;
+    for (RMAppAttempt attempt : attempts) {
+      verifyAppAttemptsInfo(jsonArray.getJSONObject(i), attempt);
+      ++i;
+    }
+  }
+
+  @Test
+  public void testAppAttemptsXML() throws JSONException, Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+    RMApp app1 = rm.submitApp(1024, "testwordcount", "user1");
+    amNodeManager.nodeHeartbeat(true);
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("cluster")
+        .path("apps").path(app1.getApplicationId().toString())
+        .path("appattempts").accept(MediaType.APPLICATION_XML)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+    String xml = response.getEntity(String.class);
+
+    DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+    DocumentBuilder db = dbf.newDocumentBuilder();
+    InputSource is = new InputSource();
+    is.setCharacterStream(new StringReader(xml));
+    Document dom = db.parse(is);
+    NodeList nodes = dom.getElementsByTagName("appAttempts");
+    assertEquals("incorrect number of elements", 1, nodes.getLength());
+    NodeList attempt = dom.getElementsByTagName("appAttempt");
+    assertEquals("incorrect number of elements", 1, attempt.getLength());
+    verifyAppAttemptsXML(attempt, app1.getCurrentAppAttempt());
+    rm.stop();
+  }
+
+  public void verifyAppAttemptsXML(NodeList nodes, RMAppAttempt appAttempt)
+      throws JSONException, Exception {
+
+    for (int i = 0; i < nodes.getLength(); i++) {
+      Element element = (Element) nodes.item(i);
+
+      verifyAppAttemptInfoGeneric(appAttempt,
+          WebServicesTestUtils.getXmlInt(element, "id"),
+          WebServicesTestUtils.getXmlLong(element, "startTime"),
+          WebServicesTestUtils.getXmlString(element, "containerId"),
+          WebServicesTestUtils.getXmlString(element, "nodeHttpAddress"),
+          WebServicesTestUtils.getXmlString(element, "nodeId"),
+          WebServicesTestUtils.getXmlString(element, "logsLink"));
+    }
+  }
+
+  public void verifyAppAttemptsInfo(JSONObject info, RMAppAttempt appAttempt)
+      throws JSONException, Exception {
+
+    assertEquals("incorrect number of elements", 6, info.length());
+
+    verifyAppAttemptInfoGeneric(appAttempt, info.getInt("id"),
+        info.getLong("startTime"), info.getString("containerId"),
+        info.getString("nodeHttpAddress"), info.getString("nodeId"),
+        info.getString("logsLink"));
+  }
+
+  public void verifyAppAttemptInfoGeneric(RMAppAttempt appAttempt, int id,
+      long startTime, String containerId, String nodeHttpAddress, String nodeId,
+      String logsLink)
+          throws JSONException, Exception {
+
+    assertEquals("id doesn't match", appAttempt.getAppAttemptId()
+        .getAttemptId(), id);
+    assertEquals("startedTime doesn't match", appAttempt.getStartTime(),
+        startTime);
+    WebServicesTestUtils.checkStringMatch("containerId", appAttempt
+        .getMasterContainer().getId().toString(), containerId);
+    WebServicesTestUtils.checkStringMatch("nodeHttpAddress", appAttempt
+        .getMasterContainer().getNodeHttpAddress(), nodeHttpAddress);
+    WebServicesTestUtils.checkStringMatch("nodeId", appAttempt
+        .getMasterContainer().getNodeId().toString(), nodeId);
+    assertTrue("logsLink doesn't match",
+        logsLink.startsWith("http://"));
+    assertTrue("logsLink doesn't contain user info",
+        logsLink.endsWith("/" + appAttempt.getSubmissionContext().getUser()));
+  }
+
 }
+
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
index 87c5d7e..06e9fcc 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
@@ -11,7 +11,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project xmlns="http://maven.apache.org/POM/4.0.0">
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
index 7be3676c..ac7154c 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/pom.xml
index 2de4e33..9ba8a9a 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/pom.xml
index 5cc90db..aef4dd1 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm
index 6301eb2..3b075f7 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm
@@ -85,6 +85,11 @@
     At the very least you should specify the <<<JAVA_HOME>>> so that it is 

     correctly defined on each remote node.

 

+    In most cases you should also specify <<<HADOOP_PID_DIR>>> and 

+    <<<HADOOP_SECURE_DN_PID_DIR>>> to point to directories that can only be

+    written to by the users that are going to run the hadoop daemons.  

+    Otherwise there is the potential for a symlink attack.

+

     Administrators can configure individual daemons using the configuration 

     options shown below in the table:

 

diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/Federation.apt.vm b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/Federation.apt.vm
index d191b5e..c7c8770 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/Federation.apt.vm
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/Federation.apt.vm
@@ -131,7 +131,7 @@
 ** Configuration:
 
   <<Step 1>>: Add the following parameters to your configuration:
-  <<<dfs.federation.nameservices>>>: Configure with list of comma separated 
+  <<<dfs.nameservices>>>: Configure with list of comma separated 
   NameServiceIDs. This will be used by Datanodes to determine all the 
   Namenodes in the cluster.
   
@@ -164,7 +164,7 @@
 ----
 <configuration>
   <property>
-    <name>dfs.federation.nameservices</name>
+    <name>dfs.nameservices</name>
     <value>ns1,ns2</value>
   </property>
   <property>
@@ -233,8 +233,7 @@
 
   Follow the following steps:
 
-  * Add configuration parameter <<<dfs.federation.nameservices>>> to 
-    the configuration.
+  * Add configuration parameter <<<dfs.nameservices>>> to the configuration.
 
   * Update the configuration with NameServiceID suffix. Configuration 
     key names have changed post release 0.20. You must use new configuration 
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HDFSHighAvailability.apt.vm b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HDFSHighAvailability.apt.vm
index 94fb854..67d4232 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HDFSHighAvailability.apt.vm
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HDFSHighAvailability.apt.vm
@@ -33,7 +33,7 @@
 
 * {Background}
 
-  Prior to Hadoop 0.23.2, the NameNode was a single point of failure (SPOF) in
+  Prior to Hadoop 2.0.0, the NameNode was a single point of failure (SPOF) in
   an HDFS cluster. Each cluster had a single NameNode, and if that machine or
   process became unavailable, the cluster as a whole would be unavailable
   until the NameNode was either restarted or brought up on a separate machine.
@@ -90,12 +90,6 @@
   prevents it from making any further edits to the namespace, allowing the new
   Active to safely proceed with failover.
 
-  <<Note:>> Currently, only manual failover is supported. This means the HA
-  NameNodes are incapable of automatically detecting a failure of the Active
-  NameNode, and instead rely on the operator to manually initiate a failover.
-  Automatic failure detection and initiation of a failover will be implemented in
-  future versions.
-
 * {Hardware resources}
 
   In order to deploy an HA cluster, you should prepare the following:
@@ -147,12 +141,12 @@
   <<hdfs-site.xml>> configuration file.
 
   The order in which you set these configurations is unimportant, but the values
-  you choose for <<dfs.federation.nameservices>> and
+  you choose for <<dfs.nameservices>> and
   <<dfs.ha.namenodes.[nameservice ID]>> will determine the keys of those that
   follow. Thus, you should decide on these values before setting the rest of the
   configuration options.
 
-  * <<dfs.federation.nameservices>> - the logical name for this new nameservice
+  * <<dfs.nameservices>> - the logical name for this new nameservice
 
     Choose a logical name for this nameservice, for example "mycluster", and use
     this logical name for the value of this config option. The name you choose is
@@ -165,7 +159,7 @@
 
 ----
 <property>
-  <name>dfs.federation.nameservices</name>
+  <name>dfs.nameservices</name>
   <value>mycluster</value>
 </property>
 ----
@@ -459,3 +453,263 @@
 
     <<Note:>> This is not yet implemented, and at present will always return
     success, unless the given NameNode is completely down.
+
+* {Automatic Failover}
+
+** Introduction
+
+  The above sections describe how to configure manual failover. In that mode,
+  the system will not automatically trigger a failover from the active to the
+  standby NameNode, even if the active node has failed. This section describes
+  how to configure and deploy automatic failover.
+
+** Components
+
+  Automatic failover adds two new components to an HDFS deployment: a ZooKeeper
+  quorum, and the ZKFailoverController process (abbreviated as ZKFC).
+
+  Apache ZooKeeper is a highly available service for maintaining small amounts
+  of coordination data, notifying clients of changes in that data, and
+  monitoring clients for failures. The implementation of automatic HDFS failover
+  relies on ZooKeeper for the following things:
+  
+    * <<Failure detection>> - each of the NameNode machines in the cluster
+    maintains a persistent session in ZooKeeper. If the machine crashes, the
+    ZooKeeper session will expire, notifying the other NameNode that a failover
+    should be triggered.
+
+    * <<Active NameNode election>> - ZooKeeper provides a simple mechanism to
+    exclusively elect a node as active. If the current active NameNode crashes,
+    another node may take a special exclusive lock in ZooKeeper indicating that
+    it should become the next active.
+
+  The ZKFailoverController (ZKFC) is a new component which is a ZooKeeper client
+  which also monitors and manages the state of the NameNode.  Each of the
+  machines which runs a NameNode also runs a ZKFC, and that ZKFC is responsible
+  for:
+
+    * <<Health monitoring>> - the ZKFC pings its local NameNode on a periodic
+    basis with a health-check command. So long as the NameNode responds in a
+    timely fashion with a healthy status, the ZKFC considers the node
+    healthy. If the node has crashed, frozen, or otherwise entered an unhealthy
+    state, the health monitor will mark it as unhealthy.
+
+    * <<ZooKeeper session management>> - when the local NameNode is healthy, the
+    ZKFC holds a session open in ZooKeeper. If the local NameNode is active, it
+    also holds a special "lock" znode. This lock uses ZooKeeper's support for
+    "ephemeral" nodes; if the session expires, the lock node will be
+    automatically deleted.
+
+    * <<ZooKeeper-based election>> - if the local NameNode is healthy, and the
+    ZKFC sees that no other node currently holds the lock znode, it will itself
+    try to acquire the lock. If it succeeds, then it has "won the election", and
+    is responsible for running a failover to make its local NameNode active. The
+    failover process is similar to the manual failover described above: first,
+    the previous active is fenced if necessary, and then the local NameNode
+    transitions to active state.
+
+  For more details on the design of automatic failover, refer to the design
+  document attached to HDFS-2185 on the Apache HDFS JIRA.
+
+** Deploying ZooKeeper
+
+  In a typical deployment, ZooKeeper daemons are configured to run on three or
+  five nodes. Since ZooKeeper itself has light resource requirements, it is
+  acceptable to collocate the ZooKeeper nodes on the same hardware as the HDFS
+  NameNode and Standby Node. Many operators choose to deploy the third ZooKeeper
+  process on the same node as the YARN ResourceManager. It is advisable to
+  configure the ZooKeeper nodes to store their data on separate disk drives from
+  the HDFS metadata for best performance and isolation.
+
+  The setup of ZooKeeper is out of scope for this document. We will assume that
+  you have set up a ZooKeeper cluster running on three or more nodes, and have
+  verified its correct operation by connecting using the ZK CLI.
+
+** Before you begin
+
+  Before you begin configuring automatic failover, you should shut down your
+  cluster. It is not currently possible to transition from a manual failover
+  setup to an automatic failover setup while the cluster is running.
+
+** Configuring automatic failover
+
+  The configuration of automatic failover requires the addition of two new
+  parameters to your configuration. In your <<<hdfs-site.xml>>> file, add:
+
+----
+ <property>
+   <name>dfs.ha.automatic-failover.enabled</name>
+   <value>true</value>
+ </property>
+----
+
+  This specifies that the cluster should be set up for automatic failover.
+  In your <<<core-site.xml>>> file, add:
+
+----
+ <property>
+   <name>ha.zookeeper.quorum</name>
+   <value>zk1.example.com:2181,zk2.example.com:2181,zk3.example.com:2181</value>
+ </property>
+----
+
+  This lists the host-port pairs running the ZooKeeper service.
+
+  As with the parameters described earlier in the document, these settings may
+  be configured on a per-nameservice basis by suffixing the configuration key
+  with the nameservice ID. For example, in a cluster with federation enabled,
+  you can explicitly enable automatic failover for only one of the nameservices
+  by setting <<<dfs.ha.automatic-failover.enabled.my-nameservice-id>>>.
+
+  There are also several other configuration parameters which may be set to
+  control the behavior of automatic failover; however, they are not necessary
+  for most installations. Please refer to the configuration key specific
+  documentation for details.
+
+** Initializing HA state in ZooKeeper
+
+  After the configuration keys have been added, the next step is to initialize
+  required state in ZooKeeper. You can do so by running the following command
+  from one of the NameNode hosts.
+
+----
+$ hdfs zkfc -formatZK
+----
+
+  This will create a znode in ZooKeeper inside of which the automatic failover
+  system stores its data.
+
+** Starting the cluster with <<<start-dfs.sh>>>
+
+  Since automatic failover has been enabled in the configuration, the
+  <<<start-dfs.sh>>> script will now automatically start a ZKFC daemon on any
+  machine that runs a NameNode. When the ZKFCs start, they will automatically
+  select one of the NameNodes to become active.
+
+** Starting the cluster manually
+
+  If you manually manage the services on your cluster, you will need to manually
+  start the <<<zkfc>>> daemon on each of the machines that runs a NameNode. You
+  can start the daemon by running:
+
+----
+$ hadoop-daemon.sh start zkfc
+----
+
+** Securing access to ZooKeeper
+
+  If you are running a secure cluster, you will likely want to ensure that the
+  information stored in ZooKeeper is also secured. This prevents malicious
+  clients from modifying the metadata in ZooKeeper or potentially triggering a
+  false failover.
+
+  In order to secure the information in ZooKeeper, first add the following to
+  your <<<core-site.xml>>> file:
+
+----
+ <property>
+   <name>ha.zookeeper.auth</name>
+   <value>@/path/to/zk-auth.txt</value>
+ </property>
+ <property>
+   <name>ha.zookeeper.acl</name>
+   <value>@/path/to/zk-acl.txt</value>
+ </property>
+----
+
+  Please note the '@' character in these values -- this specifies that the
+  configurations are not inline, but rather point to a file on disk.
+
+  The first configured file specifies a list of ZooKeeper authentications, in
+  the same format as used by the ZK CLI. For example, you may specify something
+  like:
+
+----
+digest:hdfs-zkfcs:mypassword
+----
+  ...where <<<hdfs-zkfcs>>> is a unique username for ZooKeeper, and
+  <<<mypassword>>> is some unique string used as a password.
+
+  Next, generate a ZooKeeper ACL that corresponds to this authentication, using
+  a command like the following:
+
+----
+$ java -cp $ZK_HOME/lib/*:$ZK_HOME/zookeeper-3.4.2.jar org.apache.zookeeper.server.auth.DigestAuthenticationProvider hdfs-zkfcs:mypassword
+output: hdfs-zkfcs:mypassword->hdfs-zkfcs:P/OQvnYyU/nF/mGYvB/xurX8dYs=
+----
+
+  Copy and paste the section of this output after the '->' string into the file
+  <<<zk-acls.txt>>>, prefixed by the string "<<<digest:>>>". For example:
+
+----
+digest:hdfs-zkfcs:vlUvLnd8MlacsE80rDuu6ONESbM=:rwcda
+----
+
+  In order for these ACLs to take effect, you should then rerun the
+  <<<zkfc -formatZK>>> command as described above.
+
+  After doing so, you may verify the ACLs from the ZK CLI as follows:
+
+----
+[zk: localhost:2181(CONNECTED) 1] getAcl /hadoop-ha
+'digest,'hdfs-zkfcs:vlUvLnd8MlacsE80rDuu6ONESbM=
+: cdrwa
+----
+
+** Verifying automatic failover
+
+  Once automatic failover has been set up, you should test its operation. To do
+  so, first locate the active NameNode. You can tell which node is active by
+  visiting the NameNode web interfaces -- each node reports its HA state at the
+  top of the page.
+
+  Once you have located your active NameNode, you may cause a failure on that
+  node.  For example, you can use <<<kill -9 <pid of NN>>>> to simulate a JVM
+  crash. Or, you could power cycle the machine or unplug its network interface
+  to simulate a different kind of outage.  After triggering the outage you wish
+  to test, the other NameNode should automatically become active within several
+  seconds. The amount of time required to detect a failure and trigger a
+  fail-over depends on the configuration of
+  <<<ha.zookeeper.session-timeout.ms>>>, but defaults to 5 seconds.
+
+  If the test does not succeed, you may have a misconfiguration. Check the logs
+  for the <<<zkfc>>> daemons as well as the NameNode daemons in order to further
+  diagnose the issue.
+
+
+* Automatic Failover FAQ
+
+  * <<Is it important that I start the ZKFC and NameNode daemons in any
+    particular order?>>
+
+  No. On any given node you may start the ZKFC before or after its corresponding
+  NameNode.
+
+  * <<What additional monitoring should I put in place?>>
+
+  You should add monitoring on each host that runs a NameNode to ensure that the
+  ZKFC remains running. In some types of ZooKeeper failures, for example, the
+  ZKFC may unexpectedly exit, and should be restarted to ensure that the system
+  is ready for automatic failover.
+
+  Additionally, you should monitor each of the servers in the ZooKeeper
+  quorum. If ZooKeeper crashes, then automatic failover will not function.
+
+  * <<What happens if ZooKeeper goes down?>>
+
+  If the ZooKeeper cluster crashes, no automatic failovers will be triggered.
+  However, HDFS will continue to run without any impact. When ZooKeeper is
+  restarted, HDFS will reconnect with no issues.
+
+  * <<Can I designate one of my NameNodes as primary/preferred?>>
+
+  No. Currently, this is not supported. Whichever NameNode is started first will
+  become active. You may choose to start the cluster in a specific order such
+  that your preferred node starts first.
+
+  * <<How can I initiate a manual failover when automatic failover is
+    configured?>>
+
+  Even if automatic failover is configured, you may initiate a manual failover
+  using the same <<<hdfs haadmin>>> command. It will perform a coordinated
+  failover.
\ No newline at end of file
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
index 977bd52..36600b8 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
@@ -1236,6 +1236,130 @@
 </app>
 +---+
 
+* Cluster Application Attempts API
+
+  With the application attempts API, you can obtain a collection of resources that represent an application attempt.  When you run a GET operation on this resource, you obtain a collection of App Attempt Objects. 
+
+** URI
+
+------
+  * http://<rm http address:port>/ws/v1/cluster/apps/{appid}/appattempts
+------
+
+** HTTP Operations Supported 
+
+------
+  * GET
+------
+
+** Query Parameters Supported
+
+------
+  None
+------
+
+** Elements of the <appAttempts> object
+
+  When you make a request for the list of app attempts, the information will be returned as an array of app attempt objects. 
+
+  appAttempts:
+
+*---------------+--------------+-------------------------------+
+|| Item         || Data Type   || Description                  |
+*---------------+--------------+-------------------------------+
+| appAttempt | array of app attempt objects(JSON)/zero or more app attempt objects(XML) | The collection of app attempt objects |
+*---------------+--------------+--------------------------------+
+
+** Elements of the <appAttempt> object
+
+*---------------+--------------+-------------------------------+
+|| Item         || Data Type   || Description                  |
+*---------------+--------------+-------------------------------+
+| id | string | The app attempt id |
+*---------------+--------------+--------------------------------+
+| nodeId | string | The node id of the node the attempt ran on|
+*---------------+--------------+--------------------------------+
+| nodeHttpAddress | string | The node http address of the node the attempt ran on|
+*---------------+--------------+--------------------------------+
+| logsLink | string | The http link to the app attempt logs |
+*---------------+--------------+--------------------------------+
+| containerId | string | The id of the container for the app attempt |
+*---------------+--------------+--------------------------------+
+| startTime | long | The start time of the attempt (in ms since epoch)|
+*---------------+--------------+--------------------------------+
+
+** Response Examples
+
+  <<JSON response>>
+
+  HTTP Request:
+
+------
+  GET http://<rm http address:port>/ws/v1/cluster/apps/application_1326821518301_0005/appattempts
+------
+
+  Response Header:
+
++---+
+  HTTP/1.1 200 OK
+  Content-Type: application/json
+  Transfer-Encoding: chunked
+  Server: Jetty(6.1.26)
++---+
+
+  Response Body:
+
++---+
+{
+   "appAttempts" : {
+      "appAttempt" : [
+         {
+            "nodeId" : "host.domain.com:8041",
+            "nodeHttpAddress" : "host.domain.com:8042",
+            "startTime" : 1326381444693,
+            "id" : 1,
+            "logsLink" : "http://host.domain.com:8042/node/containerlogs/container_1326821518301_0005_01_000001/user1",
+            "containerId" : "container_1326821518301_0005_01_000001"
+         }
+      ]
+   }
+}
++---+
+
+  <<XML response>>
+
+  HTTP Request:
+
+------
+  GET http://<rm http address:port>/ws/v1/cluster/apps/application_1326821518301_0005/appattempts
+  Accept: application/xml
+------
+
+  Response Header:
+
++---+
+  HTTP/1.1 200 OK
+  Content-Type: application/xml
+  Content-Length: 575
+  Server: Jetty(6.1.26)
++---+
+
+  Response Body:
+
++---+
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<appAttempts>
+  <appttempt>
+    <nodeHttpAddress>host.domain.com:8042</nodeHttpAddress>
+    <nodeId>host.domain.com:8041</nodeId>
+    <id>1</id>
+    <startTime>1326381444693</startTime>
+    <containerId>container_1326821518301_0005_01_000001</containerId>
+    <logsLink>http://host.domain.com:8042/node/containerlogs/container_1326821518301_0005_01_000001/user1</logsLink>
+  </appAttempt>
+</appAttempts>
++---+
+
 * Cluster Nodes API
 
   With the Nodes API, you can obtain a collection of resources, each of which represents a node. When you run a GET operation on this resource, you obtain a collection of Node Objects. 
diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml
index cca8427..d738f23 100644
--- a/hadoop-mapreduce-project/pom.xml
+++ b/hadoop-mapreduce-project/pom.xml
@@ -128,8 +128,8 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <!-- needed for security and runtime -->
       <artifactId>hadoop-hdfs</artifactId>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>com.google.inject</groupId>
@@ -159,6 +159,11 @@
       <groupId>commons-io</groupId>
       <artifactId>commons-io</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.hsqldb</groupId>
+      <artifactId>hsqldb</artifactId>
+      <version>2.0.0</version>
+    </dependency>
 
   </dependencies>
 
@@ -179,7 +184,7 @@
                 <!-- Using Unix script to preserve symlinks -->
                 <echo file="${project.build.directory}/dist-maketar.sh">
 
-                  which cygpath 2> /dev/null
+                  which cygpath 2&gt; /dev/null
                   if [ $? = 1 ]; then
                     BUILD_DIR="${project.build.directory}"
                   else
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/GridmixSystemTestCase.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/GridmixSystemTestCase.java
deleted file mode 100644
index b70fb9a..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/GridmixSystemTestCase.java
+++ /dev/null
@@ -1,231 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.test.system.MRCluster;
-import org.apache.hadoop.mapreduce.test.system.JTProtocol;
-import org.apache.hadoop.mapreduce.test.system.JTClient;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.gridmix.test.system.GridmixJobSubmission;
-import org.apache.hadoop.mapred.gridmix.test.system.GridmixJobVerification;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapred.gridmix.test.system.GridmixJobStory;
-import org.apache.hadoop.tools.rumen.ZombieJob;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.mapreduce.JobID;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import java.util.Iterator;
-import java.util.Map;
-import java.util.List;
-import java.util.Set;
-import java.io.IOException;
-import org.junit.Assert;
-
-/**
- * Run and verify the Gridmix jobs for given a trace.
- */
-public class GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog(GridmixSystemTestCase.class);
-  protected static Configuration  conf = new Configuration();
-  protected static MRCluster cluster;
-  protected static int cSize;
-  protected static JTClient jtClient;
-  protected static JTProtocol rtClient;
-  protected static Path gridmixDir;
-  protected static Map<String, String> map;
-  protected static GridmixJobSubmission gridmixJS;
-  protected static GridmixJobVerification gridmixJV;
-  protected static List<JobID> jobids;
-  
-  @BeforeClass
-  public static void before() throws Exception {
-    String [] excludeExpList = {"java.net.ConnectException", 
-                                "java.io.IOException"};
-    cluster = MRCluster.createCluster(conf);
-    cluster.setExcludeExpList(excludeExpList);
-    cluster.setUp();
-    cSize = cluster.getTTClients().size();
-    jtClient = cluster.getJTClient();
-    rtClient = jtClient.getProxy();
-    gridmixDir = new Path("herriot-gridmix");
-    UtilsForGridmix.createDirs(gridmixDir, rtClient.getDaemonConf());
-    map = UtilsForGridmix.getMRTraces(rtClient.getDaemonConf());
-  }
-
-  @AfterClass
-  public static void after() throws Exception {
-    UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf());
-    org.apache.hadoop.fs.FileUtil.fullyDelete(new java.io.File(System.
-        getProperty("java.io.tmpdir") + "/gridmix-st/"));
-    cluster.tearDown();
-
-    /* Clean up the proxy user directories if gridmix run with 
-      RoundRobinUserResovler mode.*/
-    if (gridmixJV != null 
-       && gridmixJV.getJobUserResolver().contains("RoundRobin")) {
-       List<String> proxyUsers = 
-           UtilsForGridmix.listProxyUsers(gridmixJS.getJobConf(),
-           UserGroupInformation.getLoginUser().getShortUserName());
-       for(int index = 0; index < proxyUsers.size(); index++){
-         UtilsForGridmix.cleanup(new Path("hdfs:///user/" + 
-            proxyUsers.get(index)), 
-            rtClient.getDaemonConf());
-       }
-    }
-  }
-  
-  /**
-   * Run the gridmix with specified runtime parameters and 
-   * verify the jobs the after completion of execution.
-   * @param runtimeValues - common runtime arguments for gridmix.
-   * @param otherValues - test specific runtime arguments for gridmix.
-   * @param tracePath - path of a trace file.
-   * @throws Exception - if an exception occurs.
-   */
-  public static void runGridmixAndVerify(String[] runtimeValues, 
-     String [] otherValues, String tracePath) throws Exception {
-     runGridmixAndVerify(runtimeValues, otherValues, tracePath , 
-         GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Run the gridmix with specified runtime parameters and
-   * verify the jobs the after completion of execution.
-   * @param runtimeValues - common runtime arguments for gridmix.
-   * @param otherValues - test specific runtime arguments for gridmix.
-   * @param tracePath - path of a trace file.
-   * @param mode - 1 for data generation, 2 for run the gridmix and 3 for
-   * data generation and run the gridmix.
-   * @throws Exception - if an exception occurs.
-   */
-  public static void runGridmixAndVerify(String [] runtimeValues, 
-      String [] otherValues, String tracePath, int mode) throws Exception {
-    List<JobID> jobids = runGridmix(runtimeValues, otherValues, mode);
-    gridmixJV = new GridmixJobVerification(new Path(tracePath), 
-                                           gridmixJS.getJobConf(), jtClient);
-    gridmixJV.verifyGridmixJobsWithJobStories(jobids);  
-  }
-
-  /**
-   * Run the gridmix with user specified mode.
-   * @param runtimeValues - common runtime parameters for gridmix.
-   * @param otherValues - test specifix runtime parameters for gridmix.
-   * @param mode -  1 for data generation, 2 for run the gridmix and 3 for
-   * data generation and run the gridmix.
-   * @return - list of gridmix job ids.
-   * @throws Exception - if an exception occurs.
-   */
-  public static List<JobID> runGridmix(String[] runtimeValues, 
-     String[] otherValues, int mode) throws Exception {
-    gridmixJS = new GridmixJobSubmission(rtClient.getDaemonConf(),
-       jtClient, gridmixDir);
-    gridmixJS.submitJobs(runtimeValues, otherValues, mode);
-    List<JobID> jobids = 
-        UtilsForGridmix.listGridmixJobIDs(jtClient.getClient(), 
-                                          gridmixJS.getGridmixJobCount());
-    return jobids;
-  }
-  
-  /**
-   * get the trace file based on given regular expression.
-   * @param regExp - trace file file pattern. 
-   * @return - trace file as string.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public static String getTraceFile(String regExp) throws IOException {
-    List<String> listTraces = UtilsForGridmix.listMRTraces(
-        rtClient.getDaemonConf());
-    Iterator<String> ite = listTraces.iterator();
-    while(ite.hasNext()) {
-      String traceFile = ite.next();
-      if (traceFile.indexOf(regExp)>=0) {
-        return traceFile;
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Validate the task memory parameters.
-   * @param tracePath - trace file.
-   * @param isTraceHasHighRamJobs - true if trace has high ram job(s) 
-   *                                otherwise its false 
-   */
-  @SuppressWarnings("deprecation")
-  public static void validateTaskMemoryParamters(String tracePath,
-      boolean isTraceHasHighRamJobs) throws IOException {
-    if (isTraceHasHighRamJobs) {
-      GridmixJobStory gjs = new GridmixJobStory(new Path(tracePath),
-                                                rtClient.getDaemonConf());
-      Set<JobID> jobids = gjs.getZombieJobs().keySet();
-      boolean isHighRamFlag = false;
-      for (JobID jobid :jobids) {
-        ZombieJob zombieJob = gjs.getZombieJobs().get(jobid);
-        JobConf origJobConf = zombieJob.getJobConf();
-        int origMapFactor =
-            GridmixJobVerification.getMapFactor(origJobConf);
-        int origReduceFactor =
-            GridmixJobVerification.getReduceFactor(origJobConf);
-        if (origMapFactor >= 2 || origReduceFactor >= 2) {
-          isHighRamFlag = true;
-          long TaskMapMemInMB =
-              GridmixJobVerification.getScaledTaskMemInMB(
-                      GridMixConfig.JOB_MAP_MEMORY_MB,
-                      GridMixConfig.CLUSTER_MAP_MEMORY,
-                      origJobConf, rtClient.getDaemonConf());
-
-          long TaskReduceMemInMB =
-              GridmixJobVerification.getScaledTaskMemInMB(
-                      GridMixConfig.JOB_REDUCE_MEMORY_MB,
-                      GridMixConfig.CLUSTER_REDUCE_MEMORY,
-                      origJobConf, rtClient.getDaemonConf());
-          long taskMapLimitInMB =
-              conf.getLong(GridMixConfig.CLUSTER_MAX_MAP_MEMORY,
-                           JobConf.DISABLED_MEMORY_LIMIT);
-
-          long taskReduceLimitInMB =
-              conf.getLong(GridMixConfig.CLUSTER_MAX_REDUCE_MEMORY,
-                           JobConf.DISABLED_MEMORY_LIMIT);
-
-          GridmixJobVerification.verifyMemoryLimits(TaskMapMemInMB,
-                                                    taskMapLimitInMB);
-          GridmixJobVerification.verifyMemoryLimits(TaskReduceMemInMB,
-                                                    taskReduceLimitInMB);
-        }
-      }
-      Assert.assertTrue("Trace doesn't have atleast one high ram job.",
-                        isHighRamFlag);
-    }
-  }
-
-  public static boolean isLocalDistCache(String fileName, String userName, 
-                                         boolean visibility) {
-    return DistributedCacheEmulator.isLocalDistCacheFile(fileName, 
-                                                         userName, visibility);
-  }
-}
-
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsAndReducesWithCustomInterval.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsAndReducesWithCustomInterval.java
deleted file mode 100644
index dd8e51e..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsAndReducesWithCustomInterval.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Test cpu emulation with default interval for gridmix jobs 
- * against different input data, submission policies and user resolvers.
- * Verify the cpu resource metrics of both maps and reduces phase of
- * Gridmix jobs with their corresponding original job in the input trace.
- */
-public class TestCPUEmulationForMapsAndReducesWithCustomInterval 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-          LogFactory.getLog("TestCPUEmulationWithUncompressedInput.class");
-  int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue();
-
- /**
-   * Generate compressed input and run {@link Gridmix} by turning on the 
-   * cpu emulation feature with default setting. The {@link Gridmix} 
-   * should use the following runtime parameters.
-   * Submission Policy : STRESS, UserResovler: RoundRobinUserResolver. 
-   * Once the {@link Gridmix} run is complete, verify cpu resource metrics of 
-   * {@link Gridmix} jobs with their corresponding original job in a trace.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void  testCPUEmulationForMapsAndReducesWithCompressedInputCase7() 
-      throws Exception {
-    final long inputSizeInMB = 1024 * 7;
-    String tracePath = getTraceFile("cpu_emul_case2");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-            { "LOADJOB",
-              RoundRobinUserResolver.class.getName(),
-              "STRESS",
-              inputSizeInMB + "m",
-              "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-              tracePath};
-
-    String [] otherArgs = { 
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-            "-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.35F",
-            "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + 
-                  GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode); 
-  }
-  
-  /**
-   * Generate uncompressed input and run {@link Gridmix} by turning on the 
-   * cpu emulation feature with default setting. The {@link Gridmix} 
-   * should use the following runtime parameters.
-   * Submission Policy : SERIAL, UserResovler: SubmitterUserResolver 
-   * Once the {@link Gridmix} run is complete, verify cpu resource metrics of 
-   * {@link Gridmix} jobs with their corresponding original job in a trace.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void  testCPUEmulatonForMapsAndReducesWithUncompressedInputCase8() 
-      throws Exception {
-    final long inputSizeInMB = cSize * 300;
-    String tracePath = getTraceFile("cpu_emul_case2");
-    Assert.assertNotNull("Trace file not found.", tracePath);
-    String [] runtimeValues = 
-              { "LOADJOB", 
-                SubmitterUserResolver.class.getName(), 
-                "SERIAL", 
-                inputSizeInMB + "m",
-                tracePath};
-
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-            "-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.4F",
-            "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + 
-                  GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN     };
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode); 
-  }
-}
-
-
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsAndReducesWithDefaultInterval.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsAndReducesWithDefaultInterval.java
deleted file mode 100644
index edd14a6..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsAndReducesWithDefaultInterval.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.JobContext;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Test cpu emulation with default interval for gridmix jobs 
- * against different input data, submission policies and user resolvers.
- * Verify the cpu resource metrics for both maps and reduces of
- * Gridmix jobs with their corresponding original job in the input trace.
- */
-public class TestCPUEmulationForMapsAndReducesWithDefaultInterval 
-                                            extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-          LogFactory.getLog(
-              "TestCPUEmulationForMapsAndReducesWithDefaultInterval.class");
-  int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue();
-
- /**
-   * Generate compressed input and run {@link Gridmix} by turning on the 
-   * cpu emulation feature with default setting. The {@link Gridmix} 
-   * should use the following runtime parameters.
-   * Submission Policy : REPLAY, UserResovler: RoundRobinUserResolver. 
-   * Once the {@link Gridmix} run is complete, verify cpu resource metrics of 
-   * {@link Gridmix} jobs with their corresponding original jobs in the trace.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void  testCPUEmulationForMapsAndReducesWithCompressedInputCase5() 
-      throws Exception {
-    final long inputSizeInMB = 7168;
-    String tracePath = getTraceFile("cpu_emul_case2");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-            { "LOADJOB",
-              RoundRobinUserResolver.class.getName(),
-              "REPLAY",
-              inputSizeInMB + "m",
-              "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-              tracePath};
-
-    String [] otherArgs = { 
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", JobContext.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-            "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + 
-                  GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode); 
-  }
-  
-  /**
-   * Generate uncompressed input and run {@link Gridmix} by turning on the 
-   * cpu emulation feature with default settings. The {@link Gridmix} 
-   * should use the following runtime parameters.
-   * Submission Policy : STRESS, UserResovler: SubmitterUserResolver 
-   * Once the Gridmix run is complete, verify cpu resource metrics of 
-   * {@link Gridmix} jobs with their corresponding original jobs in the trace.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void  testCPUEmulatonForMapsAndReducesWithUncompressedInputCase6() 
-      throws Exception {
-    final long inputSizeInMB = cSize * 400;
-    String tracePath = getTraceFile("cpu_emul_case2");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-              { "LOADJOB", 
-                SubmitterUserResolver.class.getName(), 
-                "STRESS",
-                inputSizeInMB + "m",
-                tracePath};
-
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-            "-D", JobContext.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-            "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + 
-                  GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN     };
-    
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode); 
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsWithCustomInterval.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsWithCustomInterval.java
deleted file mode 100644
index ac52058..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsWithCustomInterval.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Test the {@link Gridmix} cpu emulation with custom interval for 
- * gridmix jobs against different input data, submission policies and 
- * user resolvers. Verify the map phase cpu metrics of gridmix jobs 
- * against their original job in the trace. 
- */
-public class TestCPUEmulationForMapsWithCustomInterval 
-                                            extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog("TestCPUEmulationForMapsWithCustomInterval.class");
-  int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue();
-
-  /**
-   * Generate compressed input and run {@link Gridmix} by turning on 
-   * cpu emulation feature with custom setting. The {@link Gridmix} should 
-   * use the following runtime parameters while running gridmix jobs.
-   * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
-   * Once {@link Gridmix} run is complete, verify maps phase cpu resource 
-   * metrics of {@link Gridmix} jobs with their corresponding original
-   * in the trace.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void  testCPUEmulatonForMapsWithCompressedInputCase3() 
-      throws Exception { 
-    final long inputSizeInMB = 1024 * 7;
-    String tracePath = getTraceFile("cpu_emul_case1");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = {"LOADJOB",
-                               SubmitterUserResolver.class.getName(),
-                               "STRESS",
-                               inputSizeInMB + "m",
-                               tracePath};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" +
-              GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN,
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.25F"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode);
-  }
-
-  /**
-   * Generate uncompressed input and run {@link Gridmix} by turning on 
-   * cpu emulation feature with custom settings. The {@link Gridmix} 
-   * should use the following runtime paramters while running gridmix jobs.
-   * Submission Policy: REPLAY  User Resolver Mode: RoundRobinUserResolver
-   * Once {@link Gridmix} run is complete, verify the map phase cpu resource 
-   * metrics of {@link Gridmix} jobs with their corresponding jobs
-   * in the original trace.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testCPUEmulatonForMapsUnCompressedInputCase4() 
-      throws Exception { 
-    final long inputSizeInMB = cSize * 200;
-    String tracePath = getTraceFile("cpu_emul_case1");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-           {"LOADJOB",
-            RoundRobinUserResolver.class.getName(),
-            "REPLAY",
-            inputSizeInMB + "m",
-            "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-            tracePath};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + 
-              GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN,
-        "-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.35F"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode);
-  }
-}
-
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsWithDefaultInterval.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsWithDefaultInterval.java
deleted file mode 100644
index 6eabc53..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsWithDefaultInterval.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Test the {@link Gridmix} cpu emulation with default settings for 
- * gridmix jobs against different input data, submission policies and 
- * user resolvers. Verify the map phase cpu metrics of gridmix jobs 
- * against their original jobs in the trace. 
- */
-public class TestCPUEmulationForMapsWithDefaultInterval 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestCPUEmulationForMapsWithDefaultInterval.class");
-  int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue();
-
-  /**
-   * Generate compressed input and run {@link Gridmix} by turning on cpu 
-   * emulation feature with default settings. The {@link Gridmix} should 
-   * use the following runtime parameters while running the gridmix jobs.
-   * Submission Policy: STRESS, UserResolver: SubmitterUserResolver. 
-   * Once the {@link Gridmix} run is complete, verify map phase cpu metrics of 
-   * {@link Gridmix} jobs with their corresponding original job in a trace.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testCPUEmulatonForMapsWithCompressedInputCase1() 
-      throws Exception {
-    final long inputSizeInMB = 1024 * 6;
-    String tracePath = getTraceFile("cpu_emul_case1");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = { "LOADJOB", 
-                                SubmitterUserResolver.class.getName(), 
-                                "STRESS", 
-                                inputSizeInMB + "m", 
-                                tracePath};
-
-    String [] otherArgs = { 
-            "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + 
-                  GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_HIGH_RAM_JOB_ENABLE + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode); 
-  }
-  
-  /**
-   * Generate uncompressed input and run {@link Gridmix} by turning on 
-   * cpu emulation feature with default settings. The {@link Gridmix} 
-   * should use the following runtime parameters while running Gridmix jobs.
-   * Submission Policy: REPLAY, UserResolver: RoundRobinUserResolver
-   * Once the Gridmix run is complete, verify cpu resource metrics of 
-   * {@link Gridmix} jobs with their corresponding original job in a trace.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testCPUEmulatonForMapsWithUnCompressedInputCase2() 
-      throws Exception { 
-    final long inputSizeInMB = cSize * 200;
-    String tracePath = getTraceFile("cpu_emul_case1");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-            { "LOADJOB",
-              RoundRobinUserResolver.class.getName(),
-              "REPLAY",
-              inputSizeInMB + "m",
-              "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-              tracePath};
-
-    String [] otherArgs = { 
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_HIGH_RAM_JOB_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + 
-                  GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-           GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationEnableForAllTypesOfJobs.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationEnableForAllTypesOfJobs.java
deleted file mode 100644
index 3ade9e3..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationEnableForAllTypesOfJobs.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Verify the compression emulation for all the jobs in the trace 
- * irrespective of compressed inputs.
- */
-public class TestCompressionEmulationEnableForAllTypesOfJobs 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog(
-          "TestCompressionEmulationEnableForAllTypesOfJobs.class");
-
-  /**
-   *  Generate compressed input data and verify the compression emulation
-   *  for all the jobs in the trace irrespective of whether the original
-   *  job uses the compressed input or not.Also use the custom compression
-   *  ratios for map input, map output and reduce output.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testInputCompressionEmualtionEnableForAllJobsWithDefaultRatios() 
-      throws Exception { 
-    final long inputSizeInMB = 1024 * 6;
-    final String tracePath = getTraceFile("compression_case4_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "REPLAY",
-                                     inputSizeInMB + "m",
-                                     tracePath};
-
-    final String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_INPUT_DECOMPRESS_ENABLE + "=true",
-        "-D", GridMixConfig.GRIDMIX_INPUT_COMPRESS_RATIO + "=0.46",
-        "-D", GridMixConfig.GRIDMIX_INTERMEDIATE_COMPRESSION_RATIO + "=0.35",
-        "-D", GridMixConfig.GRIDMIX_OUTPUT_COMPRESSION_RATIO + "=0.36"
-    };
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   *  Use existing compressed input data and turn off the compression 
-   *  emulation. Verify the compression emulation whether it uses 
-   *  by the jobs or not.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testInputCompressionEmulationEnableForAllJobsWithCustomRatios() 
-      throws Exception { 
-     final String tracePath = getTraceFile("compression_case4_trace");
-     Assert.assertNotNull("Trace file has not found.", tracePath);
-     final String [] runtimeValues = {"LOADJOB",
-                                      SubmitterUserResolver.class.getName(),
-                                      "SERIAL",
-                                      tracePath};
-
-    final String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-                        GridMixRunMode.RUN_GRIDMIX.getValue());
-  }  
-}
-
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForCompressInAndUncompressOut.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForCompressInAndUncompressOut.java
deleted file mode 100644
index 4b7fc3a..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForCompressInAndUncompressOut.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.junit.Assert;
-import org.junit.Test;
-/**
- * Verify the gridmix jobs compression ratio's of input, 
- * intermediate input and with default/custom ratios.Also verify
- * the compressed output file format is enabled or not.
- *
- */
-public class TestCompressionEmulationForCompressInAndUncompressOut 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog(
-          "TestCompressionEmulationForCompressInAndUncompressOut.class");
-  final long inputSizeInMB = 1024 * 6;
-
-  /**
-   * Generate a compressed input data and verify the compression ratios 
-   * of map input and map output against default compression ratios 
-   * and also verify the whether the compressed output file output format 
-   * is enabled or not.
-   * @throws Exception -if an error occurs.
-   */
-  @Test
-  public void testCompressionEmulationOfCompressedInputWithDefaultRatios() 
-      throws Exception {
-    final String tracePath = getTraceFile("compression_case2_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "STRESS",
-                                     inputSizeInMB + "m",
-                                     tracePath};
-
-    final String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true"
-    };
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Use existing compressed input data and verify the compression ratios 
-   * of input and intermediate input against custom compression ratios 
-   * and also verify the compressed output file output format is enabled or not.
-   * @throws Exception -if an error occurs.
-   */
-  @Test
-  public void testCompressionEmulationOfCompressedInputWithCustomRatios() 
-      throws Exception {
-    final String tracePath = getTraceFile("compression_case2_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf());
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "STRESS",
-                                     inputSizeInMB + "m",
-                                     tracePath};
-
-    final String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true",
-        "-D", GridMixConfig.GRIDMIX_INPUT_DECOMPRESS_ENABLE + "=true",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_INPUT_COMPRESS_RATIO + "=0.58",
-        "-D", GridMixConfig.GRIDMIX_INTERMEDIATE_COMPRESSION_RATIO + "=0.42"
-    };
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-}
-
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForUncompressInAndCompressOut.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForUncompressInAndCompressOut.java
deleted file mode 100644
index 383fc83..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForUncompressInAndCompressOut.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.junit.Assert;
-import org.junit.Test;
-/**
- * Verify the gridmix jobs compression ratio's of reduce output and 
- * with default and custom ratios.
- */
-public class TestCompressionEmulationForUncompressInAndCompressOut
-   extends GridmixSystemTestCase { 
-   private static final Log LOG = 
-       LogFactory.getLog(
-           "TestCompressionEmulationForUncompressInAndCompressOut.class");
-   final long inputSizeInMB = 1024 * 6;
-
-  /**
-   * Generate a uncompressed input data and verify the compression ratios 
-   * of reduce output against default output compression ratio.
-   * @throws Exception -if an error occurs.
-   */
-  @Test
-  public void testCompressionEmulationOfCompressedOuputWithDefaultRatios() 
-      throws Exception { 
-    final String tracePath = getTraceFile("compression_case3_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final String [] runtimeValues = 
-                     {"LOADJOB",
-                      RoundRobinUserResolver.class.getName(),
-                      "REPLAY",
-                      inputSizeInMB + "m",
-                      "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                      tracePath};
-
-    final String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true"
-    };
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Use existing uncompressed input data and verify the compression ratio 
-   * of reduce output against custom output compression ratio and also verify 
-   * the compression output file output format.
-   * @throws Exception -if an error occurs.
-   */
-  @Test
-  public void testCompressionEmulationOfCompressedOutputWithCustomRatios() 
-      throws Exception {
-    final String tracePath = getTraceFile("compression_case3_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf());
-    final String [] runtimeValues = { "LOADJOB",
-                                      SubmitterUserResolver.class.getName(),
-                                      "STRESS",
-                                      inputSizeInMB + "m",
-                                      tracePath };
-
-    final String [] otherArgs = { 
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_OUTPUT_COMPRESSION_RATIO + "=0.38"
-    };
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestDisableGridmixEmulationOfHighRam.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestDisableGridmixEmulationOfHighRam.java
deleted file mode 100644
index bb77016..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestDisableGridmixEmulationOfHighRam.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.GridmixJob;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Run the {@link Gridmix} with a high ram jobs trace by disabling the 
- * emulation of high ram  and verify each {@link Gridmix} job 
- * whether it honors the high ram or not. In disable mode it should 
- * should not honor the high ram and run it as a normal job.
- */
-public class TestDisableGridmixEmulationOfHighRam 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestDisableGridmixEmulationOfHighRam.class");
-
-  /**
-   * Generate input data and run {@link Gridmix} with a high ram jobs trace 
-   * as a load job and STRESS submission policy in a SubmitterUserResolver 
-   * mode. Verify each {@link Gridmix} job whether it honors the 
-   * high ram or not after completion of execution. In disable mode the
-   * jobs should not honor the high ram. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testEmulationOfHighRamForReducersOfMRJobs() 
-      throws Exception { 
-    final long inputSizeInMB = cSize * 250;
-    String tracePath = getTraceFile("highram_mr_jobs_case3");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    String [] runtimeValues = {"LOADJOB",
-                               SubmitterUserResolver.class.getName(),
-                               "SERIAL",
-                               inputSizeInMB + "m",
-                               tracePath};
-
-    String [] otherArgs = {
-               "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", 
-               "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", 
-               "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSAndLocalFSDCFiles.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSAndLocalFSDCFiles.java
deleted file mode 100644
index a1ae1e9..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSAndLocalFSDCFiles.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Verify the emulation of HDFS and Local FS distributed cache files against
- * the given input trace file.
- */
-public class TestEmulationOfHDFSAndLocalFSDCFiles extends 
-    GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog("TestEmulationOfLocalFSDCFiles.class");
-
-  /**
-   * Generate the input data and distributed cache files for HDFS and 
-   * local FS. Verify the gridmix emulation of HDFS and Local FS 
-   * distributed cache files in RoundRobinUserResolver mode with STRESS
-   * submission policy.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGenerateDataEmulateHDFSAndLocalFSDCFiles() 
-     throws Exception  {
-    final long inputSizeInMB = 1024 * 6;
-    final String tracePath = getTraceFile("distcache_case8_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final String [] runtimeValues = 
-                     {"LOADJOB",
-                      RoundRobinUserResolver.class.getName(),
-                      "STRESS",
-                      inputSizeInMB + "m",
-                      "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                      tracePath};
-
-    final String [] otherArgs = {
-       "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-       "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true",
-       "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-       "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Use existing input and distributed cache files for HDFS and
-   * local FS. Verify the gridmix emulation of HDFS and Local FS
-   * distributed cache files in SubmitterUserResolver mode with REPLAY
-   * submission policy.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testEmulationOfHDFSAndLocalFSDCFiles() 
-     throws Exception  {
-    final String tracePath = getTraceFile("distcache_case8_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final String [] runtimeValues ={"LOADJOB",
-                                    SubmitterUserResolver.class.getName(),
-                                    "STRESS",
-                                    tracePath};
-
-    final String [] otherArgs = { 
-       "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-       "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true",
-       "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-       "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-                        GridMixRunMode.RUN_GRIDMIX.getValue());
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFileUsesMultipleJobs.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFileUsesMultipleJobs.java
deleted file mode 100644
index 7f8938f..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFileUsesMultipleJobs.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Verify the Gridmix emulation of HDFS distributed cache file which uses 
- * different jobs that are submitted with different users.
- */
-public class TestEmulationOfHDFSDCFileUsesMultipleJobs extends 
-    GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog("TestEmulationOfHDFSDCFileUsesMultipleJobs.class");
-
-  /**
-   * Generate the input data and HDFS distributed cache file based 
-   * on given input trace. Verify the Gridmix emulation of HDFS
-   * distributed cache file in RoundRobinResolver mode with 
-   * STRESS submission policy.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGenerateAndEmulationOfHDFSDCFile() 
-     throws Exception { 
-    final long inputSizeInMB = 1024 * 6;
-    final String tracePath = getTraceFile("distcache_case9_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final String [] runtimeValues = 
-                     {"LOADJOB",
-                      RoundRobinUserResolver.class.getName(),
-                      "STRESS",
-                      inputSizeInMB + "m",
-                      "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                      tracePath};
-
-    final String [] otherArgs = {
-        "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Verify the Gridmix emulation of HDFS distributed cache
-   * file in SubmitterUserResolver mode with STRESS submission policy 
-   * by using the existing input data and HDFS distributed cache file. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixEmulationOfHDFSPublicDCFile() 
-      throws Exception {
-    final String tracePath = getTraceFile("distcache_case9_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "STRESS",
-                                     tracePath};
-
-    final String [] otherArgs = {
-      "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-      "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-                        GridMixRunMode.RUN_GRIDMIX.getValue());
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFilesWithDifferentVisibilities.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFilesWithDifferentVisibilities.java
deleted file mode 100644
index 453e5b9..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFilesWithDifferentVisibilities.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- *  Verify the Gridmix emulation of HDFS distributed cache files of 
- *  different visibilities. 
- */
-
-public class TestEmulationOfHDFSDCFilesWithDifferentVisibilities 
-    extends GridmixSystemTestCase {
-  private static final Log LOG = 
-     LogFactory.getLog(
-         "TestEmulationOfHDFSDCFilesWithDifferentVisibilities.class");
-  
-  /**
-   * Generate input data and HDFS distributed cache files of different
-   * visibilities based on given input trace. Verify the Gridmix emulation 
-   * of HDFS distributed cache files of different visibilities in 
-   * RoundRobinUserResolver mode with SERIAL submission policy.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGenerateAndEmulateOfHDFSDCFilesWithDiffVisibilities() 
-     throws Exception {
-    final long INPUT_SIZE = 1024 * 9;
-    final String tracePath = getTraceFile("distcache_case5_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues = 
-                     { "LOADJOB",
-                       RoundRobinUserResolver.class.getName(),
-                       "STRESS",
-                       INPUT_SIZE+"m",
-                       "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                       tracePath};
-
-    final String [] otherArgs = { 
-        "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE +  "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Disable the distributed cache emulation and verify the Gridmix jobs
-   * whether it emulates or not. 
-   * @throws Exception
-   */
-  @Test
-  public void testHDFSDCFilesWithoutEnableDCEmulation() 
-     throws Exception {
-    final String tracePath = getTraceFile("distcache_case6_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues ={ "LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "REPLAY",
-                                     tracePath};
-    final String [] otherArgs = {
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-                        GridMixRunMode.RUN_GRIDMIX.getValue());
-  }
-}
-
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHighRamAndNormalMRJobs.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHighRamAndNormalMRJobs.java
deleted file mode 100644
index cb3a35f..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHighRamAndNormalMRJobs.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Run the {@link Gridmix} with combination of high ram and normal jobs of
- * trace and verify whether high ram jobs{@link Gridmix} are honoring or not.
- * Normal MR jobs should not honors the high ram emulation.
- */
-public class TestEmulationOfHighRamAndNormalMRJobs
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestEmulationOfHighRamAndNormalMRJobs.class");
-
-  /**
-   * Generate input data and run the combination normal and high ram 
-   * {@link Gridmix} jobs as load job and STRESS submission policy 
-   * in a SubmitterUserResolver mode. Verify whether each {@link Gridmix} 
-   * job honors the high ram or not after completion of execution. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testEmulationOfHighRamForReducersOfMRJobs() 
-      throws Exception { 
-    final long inputSizeInMB = cSize * 250;
-    String tracePath = getTraceFile("highram_mr_jobs_case4");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    String [] runtimeArgs = {"LOADJOB",
-                               SubmitterUserResolver.class.getName(),
-                               "SERIAL",
-                               inputSizeInMB + "m",
-                               tracePath};
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", 
-            "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", 
-            "-D", GridMixConfig.GRIDMIX_HIGH_RAM_JOB_ENABLE + "=true"};
-
-    validateTaskMemoryParamters(tracePath, true);
-    runGridmixAndVerify(runtimeArgs, otherArgs, tracePath);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfLocalFSDCFiles.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfLocalFSDCFiles.java
deleted file mode 100644
index eff47f2..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfLocalFSDCFiles.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Verify the emulation of local FS distributed cache files.
- *
- */
-public class TestEmulationOfLocalFSDCFiles extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestEmulationOfLocalFSDCFiles.class");
-
-  /**
-   * Generate the input data and distributer cache files.Verify the 
-   * gridmix emulation of local file system distributed cache files 
-   * in RoundRobinUserResolver mode with REPLAY submission policy.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGenerateInputAndEmulateLocalFSDCFile() 
-     throws Exception { 
-    final long inputSizeInMB = 1024 * 6;
-    final String tracePath = getTraceFile("distcache_case7_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final String [] runtimeValues = 
-                     {"LOADJOB",
-                      RoundRobinUserResolver.class.getName(),
-                      "REPLAY",
-                      inputSizeInMB + "m",
-                      "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                      tracePath};
-
-    final String [] otherArgs = {
-       "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-       "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true",
-       "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-       "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Use existing input and local distributed cache files and  verify 
-   * the gridmix emulation of local file system distributed cache 
-   * files in SubmitterUserResolver mode with STRESS
-   * Submission policy.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testEmulationOfLocalFSDCFile() 
-     throws Exception  {
-    final String tracePath = getTraceFile("distcache_case7_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "STRESS",
-                                     tracePath};
-
-    final String [] otherArgs = {
-       "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-      "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true",
-      "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-      "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-                        GridMixRunMode.RUN_GRIDMIX.getValue());
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixDataGeneration.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixDataGeneration.java
deleted file mode 100644
index ef273b5..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixDataGeneration.java
+++ /dev/null
@@ -1,229 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapreduce.test.system.MRCluster;
-import org.apache.hadoop.mapreduce.test.system.JTProtocol;
-import org.apache.hadoop.mapreduce.test.system.JTClient;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobStatus;
-import org.apache.hadoop.mapred.gridmix.RoundRobinUserResolver;
-import org.apache.hadoop.mapred.gridmix.EchoUserResolver;
-import org.apache.hadoop.mapred.gridmix.SubmitterUserResolver;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.ContentSummary;
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
-import org.junit.Test;
-import org.junit.Assert;
-import java.io.IOException;
-
-/**
- * Verify the Gridmix data generation with various submission policies and 
- * user resolver modes.
- */
-public class TestGridMixDataGeneration {
-  private static final Log LOG = 
-      LogFactory.getLog(TestGridMixDataGeneration.class);
-  private static Configuration conf = new Configuration();
-  private static MRCluster cluster;
-  private static JTClient jtClient;
-  private static JTProtocol rtClient;
-  private static Path gridmixDir;
-  private static int cSize;
-
-  @BeforeClass
-  public static void before() throws Exception {
-    String [] excludeExpList = {"java.net.ConnectException", 
-                                "java.io.IOException"};
-    cluster = MRCluster.createCluster(conf);
-    cluster.setExcludeExpList(excludeExpList);
-    cluster.setUp();
-    cSize = cluster.getTTClients().size();
-    jtClient = cluster.getJTClient();
-    rtClient = jtClient.getProxy();
-    gridmixDir = new Path("herriot-gridmix");
-    UtilsForGridmix.createDirs(gridmixDir, rtClient.getDaemonConf());
-  }
-
-  @AfterClass
-  public static void after() throws Exception {
-    UtilsForGridmix.cleanup(gridmixDir,conf);
-    cluster.tearDown();
-  }
-  
-  /**
-   * Generate the data in a STRESS submission policy with SubmitterUserResolver 
-   * mode and verify whether the generated data matches with given 
-   * input size or not.
-   * @throws IOException
-   */
-  @Test
-  public void testGenerateDataWithSTRESSSubmission() throws Exception {
-    conf = rtClient.getDaemonConf();
-    final long inputSizeInMB = cSize * 128;
-    String [] runtimeValues = {"LOADJOB",
-                               SubmitterUserResolver.class.getName(),
-                               "STRESS",
-                               inputSizeInMB + "m",
-                               "file:///dev/null"};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", 
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-    int exitCode = 
-        UtilsForGridmix.runGridmixJob(gridmixDir, conf, 
-            GridMixRunMode.DATA_GENERATION.getValue(), 
-            runtimeValues, otherArgs);
-    Assert.assertEquals("Data generation has failed.", 0 , exitCode);
-    checkGeneratedDataAndJobStatus(inputSizeInMB);
-  }
-  
-  /**
-   * Generate the data in a REPLAY submission policy with RoundRobinUserResolver
-   * mode and verify whether the generated data matches with the given 
-   * input size or not.
-   * @throws Exception
-   */
-  @Test
-  public void testGenerateDataWithREPLAYSubmission() throws Exception {
-    conf = rtClient.getDaemonConf();
-    final long inputSizeInMB = cSize * 300;
-    String [] runtimeValues = 
-               {"LOADJOB",
-                RoundRobinUserResolver.class.getName(),
-                "REPLAY",
-                inputSizeInMB +"m",
-                "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                "file:///dev/null"};
-    
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", 
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-
-    int exitCode = 
-        UtilsForGridmix.runGridmixJob(gridmixDir, conf, 
-            GridMixRunMode.DATA_GENERATION.getValue(), 
-            runtimeValues, otherArgs);
-    Assert.assertEquals("Data generation has failed.", 0 , exitCode);
-    checkGeneratedDataAndJobStatus(inputSizeInMB); 
-  }
-  
-  /**
-   * Generate the data in a SERIAL submission policy with EchoUserResolver
-   * mode and also set the no.of bytes per file in the data.Verify whether each 
-   * file size matches with given per file size or not and also 
-   * verify the overall size of generated data.
-   * @throws Exception
-   */
-  @Test
-  public void testGenerateDataWithSERIALSubmission() throws Exception {
-    conf = rtClient.getDaemonConf();
-    long perNodeSizeInMB = 500; // 500 mb per node data
-    final long inputSizeInMB = cSize * perNodeSizeInMB;
-    String [] runtimeValues ={"LOADJOB", 
-                              EchoUserResolver.class.getName(), 
-                              "SERIAL", 
-                              inputSizeInMB + "m", 
-                              "file:///dev/null"};
-    long bytesPerFile = 200  * 1024 * 1024; // 200 mb per file of data
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_BYTES_PER_FILE + "=" + bytesPerFile, 
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", 
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-    int exitCode = 
-        UtilsForGridmix.runGridmixJob(gridmixDir, conf, 
-            GridMixRunMode.DATA_GENERATION.getValue(), 
-            runtimeValues, otherArgs);
-    Assert.assertEquals("Data generation has failed.", 0 , exitCode);
-    LOG.info("Verify the eache file size in a generate data.");
-    verifyEachNodeSize(new Path(gridmixDir, "input"), perNodeSizeInMB);
-    verifyNumOfFilesGeneratedInEachNode(new Path(gridmixDir, "input"), 
-                                        perNodeSizeInMB, bytesPerFile);
-    checkGeneratedDataAndJobStatus(inputSizeInMB);
-  }
-  
-  private void checkGeneratedDataAndJobStatus(long inputSize) 
-      throws IOException {
-    LOG.info("Verify the generated data size.");
-    long dataSizeInMB = getDataSizeInMB(new Path(gridmixDir,"input"));
-    Assert.assertTrue("Generate data has not matched with given size",
-       dataSizeInMB + 0.1 > inputSize || dataSizeInMB - 0.1 < inputSize);
- 
-    JobClient jobClient = jtClient.getClient();
-    int len = jobClient.getAllJobs().length;
-    LOG.info("Verify the job status after completion of job.");
-    Assert.assertEquals("Job has not succeeded.", JobStatus.SUCCEEDED, 
-                        jobClient.getAllJobs()[len-1].getRunState());
-  }
-  
-  private void verifyEachNodeSize(Path inputDir, long dataSizePerNode) 
-      throws IOException {
-    FileSystem fs = inputDir.getFileSystem(conf);
-    FileStatus [] fstatus = fs.listStatus(inputDir);
-    for (FileStatus fstat : fstatus) {
-      if ( fstat.isDirectory()) {
-        long fileSize = getDataSizeInMB(fstat.getPath());
-        Assert.assertTrue("The Size has not matched with given "
-                         + "per node file size(" + dataSizePerNode +"MB)", 
-                         fileSize + 0.1 > dataSizePerNode 
-                         || fileSize - 0.1 < dataSizePerNode);
-      }
-    }    
-  }
-
-  private void verifyNumOfFilesGeneratedInEachNode(Path inputDir, 
-      long nodeSize, long fileSize) throws IOException {
-    long fileCount = nodeSize/fileSize;
-    long expFileCount = Math.round(fileCount);
-    expFileCount = expFileCount + ((nodeSize%fileSize != 0)? 1:0);
-    FileSystem fs = inputDir.getFileSystem(conf);
-    FileStatus [] fstatus = fs.listStatus(inputDir);
-    for (FileStatus fstat : fstatus) {
-      if ( fstat.isDirectory()) {
-        FileSystem nodeFs = fstat.getPath().getFileSystem(conf);
-        long actFileCount = nodeFs.getContentSummary(
-            fstat.getPath()).getFileCount();
-        Assert.assertEquals("File count has not matched.", expFileCount, 
-                            actFileCount);
-      }
-    }
-  }
-
-  private static long getDataSizeInMB(Path inputDir) throws IOException {
-    FileSystem fs = inputDir.getFileSystem(conf);
-    ContentSummary csmry = fs.getContentSummary(inputDir);
-    long dataSize = csmry.getLength();
-    dataSize = dataSize/(1024 * 1024);
-    return dataSize;
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixFilePool.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixFilePool.java
deleted file mode 100644
index 883feec8..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixFilePool.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.test.system.MRCluster;
-import org.apache.hadoop.mapreduce.test.system.JTClient;
-import org.apache.hadoop.mapreduce.test.system.JTProtocol;
-import org.apache.hadoop.mapred.gridmix.FilePool;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
-import org.junit.Test;
-import java.io.IOException;
-import java.util.ArrayList;
-
-public class TestGridMixFilePool {
-  private static final Log LOG = 
-      LogFactory.getLog(TestGridMixFilePool.class);
-  private static Configuration conf = new Configuration();
-  private static MRCluster cluster;
-  private static JTProtocol remoteClient;
-  private static JTClient jtClient;
-  private static Path gridmixDir;
-  private static int clusterSize; 
-  
-  @BeforeClass
-  public static void before() throws Exception {
-    String []  excludeExpList = {"java.net.ConnectException", 
-                                 "java.io.IOException"};
-    cluster = MRCluster.createCluster(conf);
-    cluster.setExcludeExpList(excludeExpList);
-    cluster.setUp();
-    jtClient = cluster.getJTClient();
-    remoteClient = jtClient.getProxy();
-    clusterSize = cluster.getTTClients().size();
-    gridmixDir = new Path("herriot-gridmix");
-    UtilsForGridmix.createDirs(gridmixDir, remoteClient.getDaemonConf());
-  }
-
-  @AfterClass
-  public static void after() throws Exception {
-    UtilsForGridmix.cleanup(gridmixDir, conf);
-    cluster.tearDown();
-  }
-  
-  @Test
-  public void testFilesCountAndSizesForSpecifiedFilePool() throws Exception {
-    conf = remoteClient.getDaemonConf();
-    final long inputSizeInMB = clusterSize * 200;
-    int [] fileSizesInMB = {50, 100, 400, 50, 300, 10, 60, 40, 20 ,10 , 500};
-    long targetSize = Long.MAX_VALUE;
-    final int expFileCount = clusterSize + 4;
-    String [] runtimeValues ={"LOADJOB",
-                              SubmitterUserResolver.class.getName(),
-                              "STRESS",
-                              inputSizeInMB + "m",
-                              "file:///dev/null"}; 
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-
-    // Generate the input data by using gridmix framework.
-    int exitCode = 
-        UtilsForGridmix.runGridmixJob(gridmixDir, conf, 
-            GridMixRunMode.DATA_GENERATION.getValue(), 
-            runtimeValues, otherArgs);
-    Assert.assertEquals("Data generation has failed.", 0 , exitCode);
-    // Create the files without using gridmix input generation with 
-    // above mentioned sizes in a array.
-    createFiles(new Path(gridmixDir, "input"), fileSizesInMB);
-    conf.setLong(FilePool.GRIDMIX_MIN_FILE, 100 * 1024 * 1024);
-    FilePool fpool = new FilePool(conf, new Path(gridmixDir, "input"));
-    fpool.refresh();
-    verifyFilesSizeAndCountForSpecifiedPool(expFileCount, targetSize, fpool);
-  }
-  
-  private void createFiles(Path inputDir, int [] fileSizes) 
-      throws Exception { 
-    for (int size : fileSizes) {
-      UtilsForGridmix.createFile(size, inputDir, conf);
-    }
-  }
-  
-  private void verifyFilesSizeAndCountForSpecifiedPool(int expFileCount, 
-      long minFileSize, FilePool pool) throws IOException {
-    final ArrayList<FileStatus> files = new ArrayList<FileStatus>();
-    long filesSizeInBytes = pool.getInputFiles(minFileSize, files);
-    long actFilesSizeInMB = filesSizeInBytes / (1024 * 1024);
-    long expFilesSizeInMB = (clusterSize * 200) + 1300;
-    Assert.assertEquals("Files Size has not matched for specified pool.", 
-                        expFilesSizeInMB, actFilesSizeInMB);
-    int actFileCount = files.size();
-    Assert.assertEquals("File count has not matched.", expFileCount, 
-                        actFileCount);
-    int count = 0;
-    for (FileStatus fstat : files) {
-      String fp = fstat.getPath().toString();
-      count = count + ((fp.indexOf("datafile_") > 0)? 0 : 1);
-    }
-    Assert.assertEquals("Total folders are not matched with cluster size", 
-                        clusterSize, count);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressedInputGeneration.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressedInputGeneration.java
deleted file mode 100644
index 1dfc897..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressedInputGeneration.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobStatus;
-import org.apache.hadoop.mapred.gridmix.Gridmix;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Verify the Gridmix generated input if compression emulation turn on.
- */
-public class TestGridmixCompressedInputGeneration 
-    extends GridmixSystemTestCase { 
-
-  private static final Log LOG = 
-      LogFactory.getLog("TestGridmixCompressedInputGeneration.class");
-
-  /**
-   * Generate input data and verify whether input files are compressed
-   * or not.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixCompressionInputGeneration() throws Exception {
-    final long inputSizeInMB = 1024 * 7;
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "STRESS",
-                                     inputSizeInMB  + "m",
-                                     "file:///dev/null"};
-    final String [] otherArgs = { 
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true"
-    };
-    LOG.info("Verify the generated compressed input data.");
-    runAndVerify(true, inputSizeInMB, runtimeValues, otherArgs);
-  }
-
-  /**
-   * Disable compression emulation and verify whether input files are 
-   * compressed or not.
-   * @throws Exception
-   */
-  @Test
-  public void testGridmixInputGenerationWithoutCompressionEnable() 
-      throws Exception { 
-    UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf());
-    final long inputSizeInMB = 1024 * 6;
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "STRESS",
-                                     inputSizeInMB + "m",
-                                     "file:///dev/null"};
-    final String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-
-    LOG.info("Verify the generated uncompressed input data.");
-    runAndVerify(false, inputSizeInMB, runtimeValues, otherArgs);
-  }
-  
-  private void runAndVerify(boolean isCompressed, long INPUT_SIZE, 
-      String [] runtimeValues, String [] otherArgs) throws Exception { 
-    int exitCode = 
-        UtilsForGridmix.runGridmixJob(gridmixDir, conf, 
-                                      GridMixRunMode.DATA_GENERATION.getValue(),
-                                      runtimeValues,otherArgs);
-    Assert.assertEquals("Data generation has failed.", 0, exitCode);
-    verifyJobStatus();
-    verifyInputDataSize(INPUT_SIZE);
-    verifyInputFiles(isCompressed);
-  }
-  
-  private void verifyInputFiles(boolean isCompressed) throws IOException { 
-    List<String> inputFiles = 
-        getInputFiles(conf, Gridmix.getGridmixInputDataPath(gridmixDir));
-    for (String inputFile: inputFiles) {
-      boolean fileStatus = (inputFile.contains(".gz") 
-                         || inputFile.contains(".tgz"))? true : false;
-      if (isCompressed) { 
-        Assert.assertTrue("Compressed input split file was not found.",
-                          fileStatus);
-      } else {
-        Assert.assertFalse("Uncompressed input split file was not found.",
-                           fileStatus);
-      }
-    }
-  }
-
-  private void verifyInputDataSize(long INPUT_SIZE) throws IOException {
-    long actDataSize = 
-        getInputDataSizeInMB(conf, Gridmix.getGridmixInputDataPath(gridmixDir));
-    double ratio = ((double)actDataSize)/INPUT_SIZE;
-    long expDataSize = (long)(INPUT_SIZE * ratio);
-    Assert.assertEquals("Generated data has not matched with given size.", 
-                        expDataSize, actDataSize);
-  }
-
-  private void verifyJobStatus() throws IOException { 
-    JobClient jobClient = jtClient.getClient();
-    int len = jobClient.getAllJobs().length;
-    LOG.info("Verify the job status after completion of job...");
-    Assert.assertEquals("Job has not succeeded.", JobStatus.SUCCEEDED, 
-                        jobClient.getAllJobs()[len -1].getRunState());
-  }
-
-  private long getInputDataSizeInMB(Configuration conf, Path inputDir) 
-      throws IOException { 
-    FileSystem fs = inputDir.getFileSystem(conf);
-    ContentSummary csmry = fs.getContentSummary(inputDir);
-    long dataSize = csmry.getLength();
-    dataSize = dataSize/(1024 * 1024);
-    return dataSize;
-  }
-
-  private List<String> getInputFiles(Configuration conf, Path inputDir) 
-      throws IOException {
-    FileSystem fs = inputDir.getFileSystem(conf);
-    FileStatus [] listStatus = fs.listStatus(inputDir);
-    List<String> files = new ArrayList<String>();
-    for (FileStatus fileStat : listStatus) {
-      files.add(getInputFile(fileStat, conf));
-    }
-    return files;
-  }
-
-  private String getInputFile(FileStatus fstatus, Configuration conf) 
-      throws IOException {
-    String fileName = null;
-    if (!fstatus.isDirectory()) {
-      fileName = fstatus.getPath().getName();
-    } else {
-      FileSystem fs = fstatus.getPath().getFileSystem(conf);
-      FileStatus [] listStatus = fs.listStatus(fstatus.getPath());
-      for (FileStatus fileStat : listStatus) {
-         return getInputFile(fileStat, conf);
-      }
-    }
-    return fileName;
-  }
-}
-
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressionEmulationWithCompressInput.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressionEmulationWithCompressInput.java
deleted file mode 100644
index 3fdd16d..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressionEmulationWithCompressInput.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Verify the gridmix jobs compression ratios of map input, 
- * map output and reduce output with default and user specified 
- * compression ratios.
- *
- */
-public class TestGridmixCompressionEmulationWithCompressInput 
-    extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog(
-              "TestGridmixCompressionEmulationWithCompressInput.class");
-  final long inputSizeInMB = 1024 * 6;
-
-  /**
-   * Generate compressed input data and verify the map input, 
-   * map output and reduce output compression ratios of gridmix jobs 
-   * against the default compression ratios. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixCompressionRatiosAgainstDefaultCompressionRatio() 
-      throws Exception { 
-    final String tracePath = getTraceFile("compression_case1_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-
-    final String [] runtimeValues = 
-                     {"LOADJOB",
-                      RoundRobinUserResolver.class.getName(),
-                      "STRESS",
-                      inputSizeInMB + "m",
-                      "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                      tracePath};
-
-    final String [] otherArgs = { 
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Verify map input, map output and  reduce output compression ratios of
-   * gridmix jobs against user specified compression ratios. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixOuputCompressionRatiosAgainstCustomRatios() 
-      throws Exception { 
-    final String tracePath = getTraceFile("compression_case1_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf());
-
-    final String [] runtimeValues = 
-                     {"LOADJOB",
-                      RoundRobinUserResolver.class.getName(),
-                      "STRESS",
-                      inputSizeInMB + "m",
-                      "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                      tracePath};
-
-    final String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true",
-        "-D", GridMixConfig.GRIDMIX_INPUT_DECOMPRESS_ENABLE + "=true",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_INPUT_COMPRESS_RATIO + "=0.68",
-        "-D", GridMixConfig.GRIDMIX_INTERMEDIATE_COMPRESSION_RATIO + "=0.35",
-        "-D", GridMixConfig.GRIDMIX_OUTPUT_COMPRESSION_RATIO + "=0.40"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-}
-
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPrivateDCFile.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPrivateDCFile.java
deleted file mode 100644
index e6c7e6a..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPrivateDCFile.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Verify the Gridmix emulation of HDFS private distributed cache file.
- */
-public class TestGridmixEmulationOfHDFSPrivateDCFile 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestGridmixEmulationOfHDFSPrivateDCFile.class");
-  /**
-   * Generate input data and single HDFS private distributed cache 
-   * file based on given input trace.Verify the Gridmix emulation of 
-   * single private HDFS distributed cache file in RoundRobinUserResolver 
-   * mode with STRESS submission policy.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGenerateAndEmulateOfHDFSPrivateDCFile() 
-      throws Exception {
-    final long inputSizeInMB = 8192;
-    final String tracePath = getTraceFile("distcache_case3_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues = 
-                     {"LOADJOB",
-                      RoundRobinUserResolver.class.getName(),
-                      "STRESS",
-                      inputSizeInMB + "m",
-                      "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                      tracePath};
-
-    final String [] otherArgs = {
-        "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  /**
-   * Verify the Gridmix emulation of single HDFS private distributed 
-   * cache file in SubmitterUserResolver mode with REPLAY submission 
-   * policy by using the existing input data and HDFS private 
-   * distributed cache file.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixEmulationOfHDFSPrivateDCFile() 
-      throws Exception {
-    final String tracePath = getTraceFile("distcache_case3_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues ={"LOADJOB",
-                                    SubmitterUserResolver.class.getName(),
-                                    "REPLAY",
-                                    tracePath};
-    final String [] otherArgs = {
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-                        GridMixRunMode.RUN_GRIDMIX.getValue());
-  }
-}
-
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPublicDCFile.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPublicDCFile.java
deleted file mode 100644
index 0bf07fd..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPublicDCFile.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Verify the Gridmix emulation of HDFS public distributed cache file.
- */
-public class TestGridmixEmulationOfHDFSPublicDCFile 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestGridmixEmulationOfHDFSPublicDCFile.class");
-
-  /**
-   * Generate the input data and HDFS distributed cache file based 
-   * on given input trace. Verify the Gridmix emulation of single HDFS
-   * public distributed cache file in SubmitterUserResolver mode with 
-   * STRESS submission policy.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGenerateAndEmulationOfSingleHDFSDCFile() 
-      throws Exception { 
-    final long inputSizeInMB = 7168;
-    final String tracePath = getTraceFile("distcache_case1_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "STRESS",
-                                     inputSizeInMB + "m",
-                                     tracePath};
-
-    final String [] otherArgs = { 
-      "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-      "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-      "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Verify the Gridmix emulation of Single HDFS public distributed cache
-   * file in RoundRobinUserResolver mode with REPLAY submission policy 
-   * by using the existing input data and HDFS public distributed cache file. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixEmulationOfSingleHDFSPublicDCFile() 
-      throws Exception {
-    final String tracePath = getTraceFile("distcache_case1_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues = 
-                     { "LOADJOB",
-                       RoundRobinUserResolver.class.getName(),
-                       "REPLAY",
-                       "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                       tracePath};
-
-    final String [] otherArgs = {
-       "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-       "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-                        GridMixRunMode.RUN_GRIDMIX.getValue());
-  }
-}
-
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase1.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase1.java
deleted file mode 100644
index b5d821f..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase1.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.GridmixJob;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Run the {@link Gridmix} with a high ram jobs trace and 
- * verify each {@link Gridmix} job whether it honors the high ram or not.
- * In the trace the jobs should use the high ram for both maps and reduces.
- */
-public class TestGridmixEmulationOfHighRamJobsCase1 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestGridmixEmulationOfHighRamJobsCase1.class");
-
- /**
-   * Generate input data and run {@link Gridmix} with a high ram jobs trace 
-   * as a load job and STRESS submission policy in a SubmitterUserResolver 
-   * mode. Verify each {@link Gridmix} job whether it honors the high ram or not
-   * after completion of execution. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testEmulationOfHighRamForMapsAndReducesOfMRJobs() 
-      throws Exception { 
-    final long inputSizeInMB = cSize * 400;
-    String tracePath = getTraceFile("highram_mr_jobs_case1");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    String [] runtimeValues = {"LOADJOB",
-                               SubmitterUserResolver.class.getName(),
-                               "STRESS",
-                               inputSizeInMB + "m",
-                               tracePath};
-
-    String [] otherArgs = {
-               "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", 
-               "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", 
-               "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=true"};
-
-    validateTaskMemoryParamters(tracePath, true);
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase2.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase2.java
deleted file mode 100644
index bfca1f2..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase2.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.GridmixJob;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Run the {@link Gridmix} with a high ram jobs trace and 
- * verify each {@link Gridmix} job whether it honors the high ram or not.
- * In the trace the jobs should use the high ram only for maps.
- */
-public class TestGridmixEmulationOfHighRamJobsCase2 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestGridmixEmulationOfHighRamJobsCase2.class");
-
- /**
-   * Generate input data and run {@link Gridmix} with a high ram jobs trace 
-   * as a load job and REPALY submission policy in a RoundRobinUserResolver 
-   * mode. Verify each {@link Gridmix} job whether it honors the high ram or not
-   * after completion of execution. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testEmulationOfHighRamForMapsOfMRJobs() 
-      throws Exception { 
-    final long inputSizeInMB = cSize * 300;
-    String tracePath = getTraceFile("highram_mr_jobs_case2");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    String [] runtimeValues = 
-               {"LOADJOB",
-                RoundRobinUserResolver.class.getName(),
-                "REPLAY",
-                inputSizeInMB + "m",
-                "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                tracePath};
-
-    String [] otherArgs = {
-               "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", 
-               "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", 
-               "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=true"};
-
-    validateTaskMemoryParamters(tracePath, true);
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase3.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase3.java
deleted file mode 100644
index bc5e3aa..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase3.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.GridmixJob;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Run the {@link Gridmix} with a high ram jobs trace and 
- * verify each {@link Gridmix} job whether it honors the high ram or not.
- * In the trace the jobs should use the high ram only for reducers.
- */
-public class TestGridmixEmulationOfHighRamJobsCase3 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog(TestGridmixEmulationOfHighRamJobsCase3.class);
-
- /**
-   * Generate input data and run {@link Gridmix} with a high ram jobs trace 
-   * as a load job and SERIAL submission policy in a SubmitterUserResolver 
-   * mode. Verify each {@link Gridmix} job whether it honors the 
-   * high ram or not after completion of execution. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testEmulationOfHighRamForReducersOfMRJobs() 
-      throws Exception { 
-    final long inputSizeInMB = cSize * 250;
-    String tracePath = getTraceFile("highram_mr_jobs_case3");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    String [] runtimeValues = {"LOADJOB",
-                               SubmitterUserResolver.class.getName(),
-                               "SERIAL",
-                               inputSizeInMB + "m",
-                               tracePath};
-
-    String [] otherArgs = {
-               "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", 
-               "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", 
-               "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=true"};
-
-    validateTaskMemoryParamters(tracePath, true);
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPrivateDCFiles.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPrivateDCFiles.java
deleted file mode 100644
index 5f464ce..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPrivateDCFiles.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Verify the Gridmix emulation of Multiple HDFS private distributed 
- * cache files.
- */
-public class TestGridmixEmulationOfMultipleHDFSPrivateDCFiles 
-    extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog(
-          "TestGridmixEmulationOfMultipleHDFSPrivateDCFiles.class");
-
-  /**
-   * Generate input data and multiple HDFS private distributed cache 
-   * files based on given input trace.Verify the Gridmix emulation of 
-   * multiple private HDFS distributed cache files in RoundRobinUserResolver 
-   * mode with SERIAL submission policy.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGenerateAndEmulationOfMultipleHDFSPrivateDCFiles() 
-      throws Exception {
-    final long inputSize = 6144;
-    final String tracePath = getTraceFile("distcache_case4_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues = 
-                     {"LOADJOB",
-                      RoundRobinUserResolver.class.getName(),
-                      "SERIAL",
-                      inputSize+"m",
-                      "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                      tracePath};
-    final String [] otherArgs = {
-        "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-
-  /**
-   * Verify the Gridmix emulation of multiple HDFS private distributed 
-   * cache files in SubmitterUserResolver mode with STRESS submission 
-   * policy by using the existing input data and HDFS private 
-   * distributed cache files.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixEmulationOfMultipleHDFSPrivateDCFiles() 
-      throws Exception {
-    final String tracePath = getTraceFile("distcache_case4_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "STRESS",
-                                     tracePath};
-    final String [] otherArgs = {
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-                        GridMixRunMode.RUN_GRIDMIX.getValue());
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPublicDCFiles.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPublicDCFiles.java
deleted file mode 100644
index cca5da8..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPublicDCFiles.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Assert;
-import org.junit.Test;
-import java.io.IOException;
-
-/**
- * Verify the Gridmix emulation of Multiple HDFS public distributed 
- * cache files.
- */
-public class TestGridmixEmulationOfMultipleHDFSPublicDCFiles 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog(
-          "TestGridmixEmulationOfMultipleHDFSPublicDCFiles.class");
-
-  /**
-   * Generate the compressed input data and dist cache files based 
-   * on input trace. Verify the Gridmix emulation of
-   * multiple HDFS public distributed cache file.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGenerateAndEmulationOfMultipleHDFSDCFiles() 
-      throws Exception  {
-    final long inputSizeInMB = 7168;
-    final String tracePath = getTraceFile("distcache_case2_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues = 
-                     {"LOADJOB",
-                      RoundRobinUserResolver.class.getName(),
-                      "STRESS",
-                      inputSizeInMB + "m",
-                      "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                      tracePath};
-
-    final String [] otherArgs = { 
-       "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-       "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-       "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Verify the Gridmix emulation of Single HDFS public distributed cache file 
-   * by using an existing input compressed data and HDFS dist cache file. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixEmulationOfMulitpleHDFSPublicDCFile() 
-      throws Exception {
-    final String tracePath = getTraceFile("distcache_case2_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "SERIAL",
-                                     tracePath};
-
-    final String [] otherArgs = {
-      "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-      "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs,  tracePath, 
-                        GridMixRunMode.RUN_GRIDMIX.getValue());
-  }
-}
-
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith10minTrace.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith10minTrace.java
deleted file mode 100644
index ec11a2b..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith10minTrace.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.junit.Test;
-
-/**
- * Run the Gridmix with 10 minutes MR jobs trace and 
- * verify each job history against the corresponding job story 
- * in a given trace file.
- */
-public class TestGridmixWith10minTrace extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog(TestGridmixWith10minTrace.class);
-
-  /**
-   * Generate data and run gridmix by sleep jobs with STRESS submission 
-   * policy in a RoundRobinUserResolver mode against 10 minutes trace file.
-   * Verify each Gridmix job history with a corresponding job story 
-   * in a trace file after completion of all the jobs execution.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixWith10minTrace() throws Exception {
-    final long inputSizeInMB = cSize * 250;
-    final long minFileSize = 200 * 1024 * 1024;
-    String [] runtimeValues =
-               {"SLEEPJOB",
-                RoundRobinUserResolver.class.getName(),
-                "SERIAL",
-                inputSizeInMB + "m",
-                "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                map.get("10m")};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_MINIMUM_FILE_SIZE + "=" + minFileSize,
-        "-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=false",
-        "-D", GridMixConfig.GRIDMIX_SLEEPJOB_MAPTASK_ONLY + "=true",
-        "-D", GridMixConfig.GRIDMIX_SLEEP_MAP_MAX_TIME + "=10"
-    };
-    String tracePath = map.get("10m");
-    runGridmixAndVerify(runtimeValues, otherArgs,tracePath);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith12minTrace.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith12minTrace.java
deleted file mode 100644
index 9bcb45a..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith12minTrace.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.junit.Test;
-
-/**
- * Run the Gridmix with 12 minutes MR job traces and 
- * verify each job history against the corresponding job story 
- * in a given trace file.
- */
-public class TestGridmixWith12minTrace extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog(TestGridmixWith12minTrace.class);
- 
-  /**
-   * Generate data and run gridmix sleep jobs with REPLAY submission 
-   * policy in a SubmitterUserResolver mode against 12 minutes trace file.
-   * Verify each Gridmix job history with a corresponding job story 
-   * in a trace file after completion of all the jobs execution.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixWith12minTrace() throws Exception {
-    final long inputSizeInMB = cSize * 150;
-    String [] runtimeValues = {"SLEEPJOB",
-                               SubmitterUserResolver.class.getName(),
-                               "REPLAY",
-                               inputSizeInMB + "m",
-                               map.get("12m")};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_SLEEP_MAP_MAX_TIME + "=10",
-        "-D", GridMixConfig.GRIDMIX_SLEEP_REDUCE_MAX_TIME + "=5"
-    };
-
-    String tracePath = map.get("12m");
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith1minTrace.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith1minTrace.java
deleted file mode 100644
index c583e6d..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith1minTrace.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.junit.Test;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-
-/**
- * Run the Gridmix with 1 minute MR jobs trace and 
- * verify each job history against the corresponding job story 
- * in a given trace file.
- */
-public class TestGridmixWith1minTrace extends GridmixSystemTestCase{
-  private static final Log LOG = 
-      LogFactory.getLog(TestGridmixWith1minTrace.class);
-
-  /**
-   * Generate data and run gridmix by load job with STRESS submission policy
-   * in a SubmitterUserResolver mode against 1 minute trace file. 
-   * Verify each Gridmix job history with a corresponding job story in the 
-   * trace after completion of all the jobs execution.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixWith1minTrace() throws Exception {
-    final long inputSizeInMB = cSize * 400;
-    String [] runtimeValues = {"LOADJOB",
-                               SubmitterUserResolver.class.getName(),
-                               "STRESS",
-                               inputSizeInMB + "m",
-                               map.get("1m")};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-
-    String tracePath = map.get("1m");
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith2minStreamingJobTrace.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith2minStreamingJobTrace.java
deleted file mode 100644
index d9fb7c7..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith2minStreamingJobTrace.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Run the Gridmix with 2 minutes job trace which has been generated with 
- * streaming jobs histories and verify each job history against 
- * the corresponding job story in a given trace file.
- */
-public class TestGridmixWith2minStreamingJobTrace 
-    extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog("TestGridmixWith2minStreamingJobTrace.class");
-
-  /**
-   * Generate input data and run Gridmix by load job with STRESS submission 
-   * policy in a SubmitterUserResolver mode against 2 minutes job 
-   * trace file of streaming jobs. Verify each Gridmix job history with 
-   * a corresponding job story in a trace file after completion of all 
-   * the jobs execution.  
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixWith2minStreamJobTrace() throws Exception {
-    final long inputSizeInMB = cSize * 250;
-    final long minFileSize = 150 * 1024 * 1024;
-    String tracePath = getTraceFile("2m_stream");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    String [] runtimeValues = {"LOADJOB",
-                               SubmitterUserResolver.class.getName(),
-                               "STRESS",
-                               inputSizeInMB + "m",
-                               tracePath};
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=true",
-        "-D", GridMixConfig.GRIDMIX_MINIMUM_FILE_SIZE + "=" + minFileSize,
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minStreamingJobTrace.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minStreamingJobTrace.java
deleted file mode 100644
index 85dedf6..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minStreamingJobTrace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Run the Gridmix with 3 minutes job trace which has been generated with 
- * streaming jobs histories and verify each job history against 
- * corresponding job story in a given trace file.
- */
-public class TestGridmixWith3minStreamingJobTrace 
-    extends GridmixSystemTestCase {
-  private static final Log LOG = 
-     LogFactory.getLog("TestGridmixWith3minStreamingJobTrace.class");
-
-  /**
-   * Generate input data and run gridmix by load job with REPLAY submission 
-   * policy in a RoundRobinUserResolver mode against 3 minutes job trace file 
-   * of streaming job. Verify each gridmix job history with a corresponding 
-   * job story in a trace file after completion of all the jobs execution.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixWith3minStreamJobTrace() throws Exception {
-    final long inputSizeInMB = cSize * 200;
-    final long bytesPerFile = 150 * 1024 * 1024;
-    String tracePath = getTraceFile("3m_stream");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    String [] runtimeValues = 
-               {"LOADJOB",
-                RoundRobinUserResolver.class.getName(),
-                "REPLAY",
-                inputSizeInMB + "m",
-                "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                tracePath};
-
-    String [] otherArgs = { 
-        "-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=true",
-        "-D", GridMixConfig.GRIDMIX_BYTES_PER_FILE + "=" + bytesPerFile,
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minTrace.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minTrace.java
deleted file mode 100644
index 5f2171f..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minTrace.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.junit.Test;
-
-/**
- * Run the Gridmix with 3 minutes MR jobs trace and 
- * verify each job history against the corresponding job story 
- * in a given trace file.
- */
-public class TestGridmixWith3minTrace extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog(TestGridmixWith3minTrace.class);
-
-  /**
-   * Generate data and run gridmix by load job with REPLAY submission 
-   * policy in a RoundRobinUserResolver mode by using 3 minutes trace file. 
-   * Verify each Gridmix job history with a corresponding job story in 
-   * a trace after completion of all the jobs execution.  
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixWith3minTrace() throws Exception {
-    final long inputSizeInMB = cSize * 200;
-    String [] runtimeValues = 
-              {"LOADJOB",
-               RoundRobinUserResolver.class.getName(),
-               "REPLAY",
-               inputSizeInMB + "m",
-               "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-               map.get("3m")};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-
-    String tracePath = map.get("3m");
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);  
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minStreamingJobTrace.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minStreamingJobTrace.java
deleted file mode 100644
index ef1878c..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minStreamingJobTrace.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Run the Gridmix with 5 minutes job trace which has been generated with 
- * streaming jobs histories and verify each job history against 
- * corresponding job story in a given trace file.
- */
-public class TestGridmixWith5minStreamingJobTrace 
-    extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog("TestGridmixWith5minStreamingJobTrace.class");
-
-  /**
-   * Generate input data and run gridmix by load job with SERIAL submission 
-   * policy in a SubmitterUserResolver mode against 5 minutes job trace file 
-   * of streaming job. Verify each gridmix job history with a corresponding 
-   * job story in a trace file after completion of all the jobs execution.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixWith5minStreamJobTrace() throws Exception {
-    String tracePath = getTraceFile("5m_stream");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final long inputSizeInMB = cSize * 200;
-    final long bytesPerFile = 150 * 1024 * 1024;
-    String [] runtimeValues = {"LOADJOB", 
-                              SubmitterUserResolver.class.getName(), 
-                              "SERIAL", 
-                              inputSizeInMB + "m",
-                              tracePath};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_KEY_FRC + "=0.5f",
-        "-D", GridMixConfig.GRIDMIX_BYTES_PER_FILE + "=" + bytesPerFile,
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minTrace.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minTrace.java
deleted file mode 100644
index c55167e..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minTrace.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.junit.Test;
-
-/**
- * Run the Gridmix with 5 minutes MR jobs trace and 
- * verify each job history against the corresponding job story 
- * in a given trace file.
- */
-public class TestGridmixWith5minTrace extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog(TestGridmixWith5minTrace.class);
-
-  /**
-   * Generate data and run gridmix by load job with SERIAL submission 
-   * policy in a SubmitterUserResolver mode against 5 minutes trace file. 
-   * Verify each Gridmix job history with a corresponding job story 
-   * in a trace file after completion of all the jobs.  
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixWith5minTrace() throws Exception {
-    final long inputSizeInMB = cSize * 300;
-    final long minFileSize = 100 * 1024 * 1024;
-    String [] runtimeValues ={"LOADJOB", 
-                              SubmitterUserResolver.class.getName(), 
-                              "SERIAL", 
-                              inputSizeInMB + "m", 
-                              map.get("5m")};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_MINIMUM_FILE_SIZE + "=" + minFileSize
-    };
-
-    String tracePath = map.get("5m");
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}
-
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith7minTrace.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith7minTrace.java
deleted file mode 100644
index 55be37b..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith7minTrace.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.junit.Test;
-
-/**
- * Run the Gridmix with 7 minutes MR jobs trace and 
- * verify each job history against the corresponding job story 
- * in a given trace file.
- */
-public class TestGridmixWith7minTrace extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog(TestGridmixWith7minTrace.class);
-
-  /**
-   * Generate data and run gridmix by sleep job with STRESS submission 
-   * policy in a SubmitterUserResolver mode against 7 minute trace file.
-   * Verify each Gridmix job history with a corresponding job story 
-   * in a trace file after completion of all the jobs execution.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixWith7minTrace() throws Exception {
-    final long inputSizeInMB = cSize * 400;
-    final long minFileSize = 200 * 1024 * 1024;
-    String [] runtimeValues ={"SLEEPJOB",
-                              SubmitterUserResolver.class.getName(),
-                              "STRESS",
-                              inputSizeInMB + "m",
-                              map.get("7m")};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_MINIMUM_FILE_SIZE + "=" + minFileSize,
-        "-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=false"
-    };
-    String tracePath = map.get("7m");
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsAndReducesWithCustomIntrvl.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsAndReducesWithCustomIntrvl.java
deleted file mode 100644
index a82e806..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsAndReducesWithCustomIntrvl.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Test the {@link Gridmix} memory emulation feature for the jobs with 
- * custom progress interval, different input data, submission policies
- * and user resolver modes. Verify the total heap usage of map and reduce
- * tasks of the jobs with corresponding original job in the trace. 
- */
-public class TestMemEmulForMapsAndReducesWithCustomIntrvl 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestMemEmulForMapsAndReducesWithCustomIntrvl.class");
-  /**
-   * Generate compressed input and run {@link Gridmix} by turning on the
-   * memory emulation with custom progress interval. The {@link Gridmix} 
-   * should use the following runtime parameters while running the jobs.
-   * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
-   * Verify maps and reduces total heap memory usage of {@link Gridmix} jobs 
-   * with corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForReducesWithCompressedInputCase7() 
-      throws Exception { 
-    final long inputSizeInMB = 1024 * 7;
-    String tracePath = getTraceFile("mem_emul_case2");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-            { "LOADJOB",
-              RoundRobinUserResolver.class.getName(),
-              "STRESS",
-              inputSizeInMB + "m",
-              "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-              tracePath};
-
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" + 
-                GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL + "=0.3F",
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-           GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Generate uncompressed input and run {@link Gridmix} by turning on the
-   * memory emulation with custom progress interval. The {@link Gridmix}
-   * should use the following runtime parameters while running the jobs.
-   * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
-   * Verify maps and reduces total heap memory usage of {@link Gridmix} jobs 
-   * with corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForReducesWithUncompressedInputCase8()
-      throws Exception {
-    final long inputSizeInMB = cSize * 300;
-    String tracePath = getTraceFile("mem_emul_case2");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-              { "LOADJOB", 
-                SubmitterUserResolver.class.getName(), 
-                "REPLAY", 
-                inputSizeInMB + "m",
-                tracePath};
-
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" + 
-                  GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL + "=0.2F",
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-            GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsAndReducesWithDefaultIntrvl.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsAndReducesWithDefaultIntrvl.java
deleted file mode 100644
index e1f211a..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsAndReducesWithDefaultIntrvl.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Test the {@link Gridmix} memory emulation feature for gridmix jobs
- * with default progress interval, different input data, submission 
- * policies and user resolver modes. Verify the total heap usage of
- * map and reduce tasks of the jobs with corresponding original
- * job in the trace. 
- */
-public class TestMemEmulForMapsAndReducesWithDefaultIntrvl 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestMemEmulForMapsAndReducesWithDefaultIntrvl.class");
-
-  /**
-   * Generate compressed input and run {@link Gridmix} by turning on the
-   * memory emulation with default progress interval. The {@link Gridmix} 
-   * should use the following runtime parameters while running the jobs.
-   * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
-   * Verify maps and reduces total heap memory usage of {@link Gridmix} jobs 
-   * with corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForReducesWithCompressedInputCase5() 
-      throws Exception { 
-    final long inputSizeInMB = 1024 * 7;
-    String tracePath = getTraceFile("mem_emul_case2");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-            { "LOADJOB",
-              RoundRobinUserResolver.class.getName(),
-              "STRESS",
-              inputSizeInMB + "m",
-              "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-              tracePath};
-
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" + 
-                GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-           GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-
-  /**
-   * Generate uncompressed input and run {@link Gridmix} by turning on the
-   * memory emulation with default progress interval. The {@link Gridmix} 
-   * should use the following runtime parameters while running the jobs. 
-   * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
-   * Verify maps and reduces total heap memory usage of {@link Gridmix} jobs 
-   * with corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForReducesWithUncompressedInputCase6()
-      throws Exception {
-    final long inputSizeInMB = cSize * 300;
-    String tracePath = getTraceFile("mem_emul_case2");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-              { "LOADJOB", 
-                SubmitterUserResolver.class.getName(), 
-                "REPLAY", 
-                inputSizeInMB + "m",
-                tracePath};
-
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" + 
-                  GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-            GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithCustomHeapMemoryRatio.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithCustomHeapMemoryRatio.java
deleted file mode 100644
index da48ad4..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithCustomHeapMemoryRatio.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Test the {@link Gridmix} memory emulation feature for {@link Gridmix} jobs 
- * with default progress interval, custom heap memory ratio, different input 
- * data, submission policies and user resolver modes. Verify the total heap 
- * usage of map and reduce tasks of the jobs with corresponding the original job 
- * in the trace. 
- */
-public class TestMemEmulForMapsWithCustomHeapMemoryRatio 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestMemEmulForMapsWithCustomHeapMemoryRatio.class");
-
-  /**
-   * Generate compressed input and run {@link Gridmix} by turning on the
-   * memory emulation. The {@link Gridmix} should use the following runtime 
-   * parameters while running the jobs.
-   * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
-   * Verify total heap memory usage of the tasks of {@link Gridmix} jobs with 
-   * corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForMapsWithCompressedInputCase1() 
-     throws Exception {
-    final long inputSizeInMB = 1024 * 7;
-    String tracePath = getTraceFile("mem_emul_case2");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    String [] runtimeValues = 
-            { "LOADJOB",
-              SubmitterUserResolver.class.getName(),
-              "STRESS",
-              inputSizeInMB + "m",
-              tracePath};
-
-    String [] otherArgs = { 
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "="  +
-                  GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-            "-D", GridMixConfig.GRIDMIX_HEAP_FREE_MEMORY_RATIO + "=0.5F"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-           GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Generate uncompressed input and run {@link Gridmix} by turning on the
-   * memory emulation. The {@link Gridmix} should use the following runtime 
-   * parameters while running the jobs.
-   *  Submission Policy : STRESS, User Resolver Mode : RoundRobinUserResolver
-   * Verify total heap memory usage of tasks of {@link Gridmix} jobs with 
-   * corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForMapsWithUncompressedInputCase2() 
-      throws Exception {
-    final long inputSizeInMB = cSize * 300;
-    String tracePath = getTraceFile("mem_emul_case2");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    String [] runtimeValues = 
-              { "LOADJOB", 
-                RoundRobinUserResolver.class.getName(), 
-                "STRESS",
-                inputSizeInMB + "m",
-                "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                tracePath};
-
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" +  
-                  GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-            "-D", GridMixConfig.GRIDMIX_HEAP_FREE_MEMORY_RATIO + "=0.4F"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-            GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithCustomIntrvl.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithCustomIntrvl.java
deleted file mode 100644
index 5d1d452..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithCustomIntrvl.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Test the {@link Gridmix} memory emulation feature for {@link Gridmix} jobs 
- * with custom progress interval, different input data, submission policies 
- * and user resolver modes. Verify the total heap usage of map tasks of
- * the jobs with corresponding the original job in the trace. 
- */
-public class TestMemEmulForMapsWithCustomIntrvl extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestMemEmulForMapsWithCustomIntrvl.class");
-
-  /**
-   * Generate compressed input and run {@link Gridmix} by turning on the
-   * memory emulation with custom progress interval. The {@link Gridmix}
-   * should use the following runtime parameters while running the jobs.
-   * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
-   * Verify maps total heap memory usage of {@link Gridmix} jobs with 
-   * corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForMapsWithCompressedInputCase3() 
-     throws Exception {
-    final long inputSizeInMB = 1024 * 7;
-    String tracePath = getTraceFile("mem_emul_case1");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-            { "LOADJOB",
-              SubmitterUserResolver.class.getName(),
-              "STRESS",
-              inputSizeInMB + "m",
-              tracePath};
-
-    String [] otherArgs = { 
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "="  +
-                  GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL + "=0.2F",
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-           GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-
-  /**
-   * Generate uncompressed input and run {@link Gridmix} by turning on the
-   * memory emulation with custom progress interval. The {@link Gridmix} 
-   * should use the following runtime parameters while running the jobs.
-   *  Submission Policy : STRESS, User Resolver Mode : RoundRobinUserResolver
-   * Verify maps total heap memory usage of {@link Gridmix} jobs with 
-   * corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForMapsWithUncompressedInputCase4() 
-      throws Exception {
-    final long inputSizeInMB = cSize * 300;
-    String tracePath = getTraceFile("mem_emul_case1");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-              { "LOADJOB", 
-                RoundRobinUserResolver.class.getName(), 
-                "STRESS",
-                inputSizeInMB + "m",
-                "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                tracePath};
-
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" +  
-                  GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL + "=0.3F",
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-            GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithDefaultIntrvl.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithDefaultIntrvl.java
deleted file mode 100644
index ff136b8..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithDefaultIntrvl.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Test the {@link Gridmix} memory emulation feature for {@link Gridmix} jobs 
- * with default progress interval, different input data, submission policies 
- * and user resolver modes. Verify the total heap usage of map tasks of the 
- * jobs with corresponding original job in the trace. 
- */
-public class TestMemEmulForMapsWithDefaultIntrvl extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-          LogFactory.getLog("TestMemEmulForMapsWithDefaultIntrvl.class");
-
-  /**
-   * Generate compressed input and run {@link Gridmix} by turning on the
-   * memory emulation with default progress interval. The {@link Gridmix} 
-   * should use the following runtime parameters while running the jobs.
-   * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
-   * Verify maps total heap memory usage of {@link Gridmix} jobs with 
-   * corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForMapsWithCompressedInputCase1() 
-     throws Exception {
-    final long inputSizeInMB = 1024 * 7;
-    String tracePath = getTraceFile("mem_emul_case1");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-            { "LOADJOB",
-              SubmitterUserResolver.class.getName(),
-              "STRESS",
-              inputSizeInMB + "m",
-              tracePath};
-
-    String [] otherArgs = { 
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "="  +
-                  GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-           GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Generate uncompressed input and run {@link Gridmix} by turning on the
-   * memory emulation with default progress interval. The {@link Gridmix} 
-   * should use the following runtime parameters while running the jobs.
-   *  Submission Policy : STRESS, User Resolver Mode : RoundRobinUserResolver
-   * Verify maps total heap memory usage of {@link Gridmix} jobs with 
-   * corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForMapsWithUncompressedInputCase2() 
-      throws Exception {
-    final long inputSizeInMB = cSize * 300;
-    String tracePath = getTraceFile("mem_emul_case1");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-              { "LOADJOB", 
-                RoundRobinUserResolver.class.getName(), 
-                "STRESS",
-                inputSizeInMB + "m",
-                "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                tracePath};
-
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" +  
-                  GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-            GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixConfig.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixConfig.java
deleted file mode 100644
index fc99162..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixConfig.java
+++ /dev/null
@@ -1,285 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix.test.system;
-
-import org.apache.hadoop.mapred.gridmix.Gridmix;
-import org.apache.hadoop.mapred.gridmix.JobCreator;
-import org.apache.hadoop.mapred.gridmix.SleepJob;
-import org.apache.hadoop.mapreduce.MRConfig;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
-import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.*;
-
-/**
- * Gridmix system tests configurations. 
- */
-public class GridMixConfig {
-
-  /**
-   *  Gridmix original job id.
-   */
-  public static final String GRIDMIX_ORIGINAL_JOB_ID = Gridmix.ORIGINAL_JOB_ID;
-
-  /**
-   *  Gridmix output directory.
-   */
-  public static final String GRIDMIX_OUTPUT_DIR = Gridmix.GRIDMIX_OUT_DIR; 
-
-  /**
-   * Gridmix job type (LOADJOB/SLEEPJOB).
-   */
-  public static final String GRIDMIX_JOB_TYPE = JobCreator.GRIDMIX_JOB_TYPE;
-
-  /**
-   *  Gridmix submission use queue.
-   */
-  /* In Gridmix package the visibility of below mentioned 
-  properties are protected and it have not visible outside 
-  the package. However,it should required for system tests, 
-  so it's re-defining in system tests config file.*/
-  public static final String GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE = 
-      "gridmix.job-submission.use-queue-in-trace";
-  
-  /**
-   *  Gridmix user resolver(RoundRobinUserResolver/
-   *  SubmitterUserResolver/EchoUserResolver).
-   */
-  public static final String GRIDMIX_USER_RESOLVER = Gridmix.GRIDMIX_USR_RSV;
-
-  /**
-   *  Gridmix queue depth.
-   */
-  public static final String GRIDMIX_QUEUE_DEPTH = Gridmix.GRIDMIX_QUE_DEP;
-
-  /* In Gridmix package the visibility of below mentioned 
-  property is protected and it should not available for 
-  outside the package. However,it should required for 
-  system tests, so it's re-defining in system tests config file.*/
-  /**
-   * Gridmix generate bytes per file.
-   */
-  public static final String GRIDMIX_BYTES_PER_FILE = 
-      "gridmix.gen.bytes.per.file";
-  
-  /**
-   *  Gridmix job submission policy(STRESS/REPLAY/SERIAL).
-   */
-
-  public static final String GRIDMIX_SUBMISSION_POLICY =
-      "gridmix.job-submission.policy";
-
-  /**
-   *  Gridmix minimum file size.
-   */
-  public static final String GRIDMIX_MINIMUM_FILE_SIZE =
-      "gridmix.min.file.size";
-
-  /**
-   * Gridmix key fraction.
-   */
-  public static final String GRIDMIX_KEY_FRC = 
-      "gridmix.key.fraction";
-
-  /**
-   * Gridmix compression enable
-   */
-  public static final String GRIDMIX_COMPRESSION_ENABLE =
-      "gridmix.compression-emulation.enable";
-  /**
-   * Gridmix distcache enable
-   */
-  public static final String GRIDMIX_DISTCACHE_ENABLE = 
-      "gridmix.distributed-cache-emulation.enable";
-
-  /**
-   * Gridmix input decompression enable.
-   */
-  public static final String GRIDMIX_INPUT_DECOMPRESS_ENABLE = 
-    "gridmix.compression-emulation.input-decompression.enable";
-
-  /**
-   * Gridmix input compression ratio.
-   */
-  public static final String GRIDMIX_INPUT_COMPRESS_RATIO = 
-    "gridmix.compression-emulation.map-input.decompression-ratio";
-
-  /**
-   * Gridmix intermediate compression ratio.
-   */
-  public static final String GRIDMIX_INTERMEDIATE_COMPRESSION_RATIO = 
-    "gridmix.compression-emulation.map-output.compression-ratio";
-
-  /**
-   * Gridmix output compression ratio.
-   */
-  public static final String GRIDMIX_OUTPUT_COMPRESSION_RATIO = 
-      "gridmix.compression-emulation.reduce-output.compression-ratio";
-
-  /**
-   * Gridmix distributed cache visibilities.
-   */
-  public static final String GRIDMIX_DISTCACHE_VISIBILITIES = 
-      MRJobConfig.CACHE_FILE_VISIBILITIES;
-
-  /**
-   * Gridmix distributed cache files.
-   */
-  public static final String GRIDMIX_DISTCACHE_FILES = 
-      MRJobConfig.CACHE_FILES;
-  
-  /**
-   * Gridmix distributed cache files size.
-   */
-  public static final String GRIDMIX_DISTCACHE_FILESSIZE = 
-      MRJobConfig.CACHE_FILES_SIZES;
-
-  /**
-   * Gridmix distributed cache files time stamp.
-   */
-  public static final String GRIDMIX_DISTCACHE_TIMESTAMP =
-      MRJobConfig.CACHE_FILE_TIMESTAMPS;
-
-  /**
-   *  Gridmix logger mode.
-   */
-  public static final String GRIDMIX_LOG_MODE =
-      "log4j.logger.org.apache.hadoop.mapred.gridmix";
-
-  /**
-   * Gridmix sleep job map task only.
-   */
-  public static final String GRIDMIX_SLEEPJOB_MAPTASK_ONLY = 
-      SleepJob.SLEEPJOB_MAPTASK_ONLY;
-
-  /**
-   * Gridmix sleep map maximum time.
-   */
-  public static final String GRIDMIX_SLEEP_MAP_MAX_TIME = 
-      SleepJob.GRIDMIX_SLEEP_MAX_MAP_TIME;
-
-  /**
-   * Gridmix sleep reduce maximum time.
-   */
-  public static final String GRIDMIX_SLEEP_REDUCE_MAX_TIME = 
-      SleepJob.GRIDMIX_SLEEP_MAX_REDUCE_TIME;
-
-  /**
-   * Gridmix high ram job emulation enable.
-   */
-  public static final String GRIDMIX_HIGH_RAM_JOB_ENABLE = 
-      "gridmix.highram-emulation.enable";
-
-  /**
-   * Job map memory in mb.
-   */
-  public static final String JOB_MAP_MEMORY_MB = 
-      MRJobConfig.MAP_MEMORY_MB;
-
-  /**
-   * Job reduce memory in mb.
-   */
-  public static final String JOB_REDUCE_MEMORY_MB = 
-      MRJobConfig.REDUCE_MEMORY_MB;
-
-  /**
-   * Cluster map memory in mb. 
-   */
-  public static final String CLUSTER_MAP_MEMORY = 
-      MRConfig.MAPMEMORY_MB;
-
-  /**
-   * Cluster reduce memory in mb.
-   */
-  public static final String CLUSTER_REDUCE_MEMORY = 
-      MRConfig.REDUCEMEMORY_MB;
-
-  /**
-   * Cluster maximum map memory.
-   */
-  public static final String CLUSTER_MAX_MAP_MEMORY = 
-      JTConfig.JT_MAX_MAPMEMORY_MB;
-
-  /**
-   * Cluster maximum reduce memory.
-   */
-  public static final String CLUSTER_MAX_REDUCE_MEMORY = 
-      JTConfig.JT_MAX_REDUCEMEMORY_MB;
-
- /**
-  * Gridmix cpu emulation.
-  */
- public static final String GRIDMIX_CPU_EMULATON =
-     ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS;
-
- /**
-  *  Gridmix cpu usage emulation plugin.
-  */
- public  static final String GRIDMIX_CPU_USAGE_PLUGIN =
-     CumulativeCpuUsageEmulatorPlugin.class.getName();
-
- /**
-  * Gridmix cpu emulation custom interval.
-  */
- public static final String GRIDMIX_CPU_CUSTOM_INTERVAL =
-     CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL;
-
- /**
-  * Gridmix cpu emulation lower limit.
-  */
- public static int GRIDMIX_CPU_EMULATION_LOWER_LIMIT = 55;
-
- /**
-  * Gridmix cpu emulation upper limit.
-  */
- public static int GRIDMIX_CPU_EMULATION_UPPER_LIMIT = 130;
-
- /**
-  * Gridmix heap memory custom interval
-  */
- public static final String GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL = 
-     TotalHeapUsageEmulatorPlugin.HEAP_EMULATION_PROGRESS_INTERVAL;
-  
- /**
-  *  Gridmix heap free memory ratio
-  */
- public static final String GRIDMIX_HEAP_FREE_MEMORY_RATIO =
-     TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO;
-  
- /**
-  *  Gridmix memory emulation plugin
-  */
- public static final String GRIDMIX_MEMORY_EMULATION_PLUGIN = 
-     TotalHeapUsageEmulatorPlugin.class.getName();
-  
- /**
-  *  Gridmix memory emulation
-  */
- public static final String GRIDMIX_MEMORY_EMULATON = 
-     ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS;
-  
- /**
-  *  Gridmix memory emulation lower limit.
-  */
- public static int GRIDMIX_MEMORY_EMULATION_LOWER_LIMIT = 55;
-  
- /**
-  * Gridmix memory emulation upper limit. 
-  */
- public static int GRIDMIX_MEMORY_EMULATION_UPPER_LIMIT = 130;
-
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixRunMode.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixRunMode.java
deleted file mode 100644
index 0abfc5c..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixRunMode.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix.test.system;
-/**
- * Gridmix run modes. 
- *
- */
-public enum GridMixRunMode {
-   DATA_GENERATION(1), RUN_GRIDMIX(2), DATA_GENERATION_AND_RUN_GRIDMIX(3);
-   private int mode;
-
-   GridMixRunMode (int mode) {
-      this.mode = mode;
-   }
-   
-   public int getValue() {
-     return mode;
-   }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobStory.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobStory.java
deleted file mode 100644
index ad00f0d..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobStory.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix.test.system;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.tools.rumen.ZombieJobProducer;
-import org.apache.hadoop.tools.rumen.ZombieJob;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-/**
- * Build the job stories with a given trace file. 
- */
-public class GridmixJobStory {
-  private static Log LOG = LogFactory.getLog(GridmixJobStory.class);
-  private Path path;
-  private Map<JobID, ZombieJob> zombieJobs;
-  private Configuration conf;
-  
-  public GridmixJobStory(Path path, Configuration conf) {
-    this.path = path;
-    this.conf = conf;
-    try {
-       zombieJobs = buildJobStories();
-       if(zombieJobs == null) {
-          throw new NullPointerException("No jobs found in a " 
-              + " given trace file.");
-       }
-    } catch (IOException ioe) {
-      LOG.warn("Error:" + ioe.getMessage());
-    } catch (NullPointerException npe) {
-      LOG.warn("Error:" + npe.getMessage());
-    }
-  }
-  
-  /**
-   * Get the zombie jobs as a map.
-   * @return the zombie jobs map.
-   */
-  public Map<JobID, ZombieJob> getZombieJobs() {
-    return zombieJobs;
-  }
-  
-  /**
-   * Get the zombie job of a given job id.
-   * @param jobId - gridmix job id.
-   * @return - the zombie job object.
-   */
-  public ZombieJob getZombieJob(JobID jobId) {
-    return zombieJobs.get(jobId);
-  }
-  
-  private Map<JobID, ZombieJob> buildJobStories() throws IOException {
-    ZombieJobProducer zjp = new ZombieJobProducer(path,null, conf);
-    Map<JobID, ZombieJob> hm = new HashMap<JobID, ZombieJob>();
-    ZombieJob zj = zjp.getNextJob();
-    while (zj != null) {
-      hm.put(zj.getJobID(),zj);
-      zj = zjp.getNextJob();
-    }
-    if (hm.size() == 0) {
-      return null;
-    } else {
-      return hm;
-    }
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobSubmission.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobSubmission.java
deleted file mode 100644
index 6a5699e..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobSubmission.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix.test.system;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.test.system.JTClient;
-import org.junit.Assert;
-
-/**
- * Submit the gridmix jobs. 
- */
-public class GridmixJobSubmission {
-  private static final Log LOG = 
-      LogFactory.getLog(GridmixJobSubmission.class);
-  private int gridmixJobCount;
-  private Configuration conf;
-  private Path gridmixDir;
-  private JTClient jtClient;
-
-  public GridmixJobSubmission(Configuration conf, JTClient jtClient , 
-                              Path gridmixDir) { 
-    this.conf = conf;
-    this.jtClient = jtClient;
-    this.gridmixDir = gridmixDir;
-  }
-  
-  /**
-   * Submit the gridmix jobs.
-   * @param runtimeArgs - gridmix common runtime arguments.
-   * @param otherArgs - gridmix other runtime arguments.
-   * @param traceInterval - trace time interval.
-   * @throws Exception
-   */
-  public void submitJobs(String [] runtimeArgs, 
-                         String [] otherArgs, int mode) throws Exception {
-    int prvJobCount = jtClient.getClient().getAllJobs().length;
-    int exitCode = -1;
-    if (otherArgs == null) {
-      exitCode = UtilsForGridmix.runGridmixJob(gridmixDir, conf, 
-                                               mode, runtimeArgs);
-    } else {
-      exitCode = UtilsForGridmix.runGridmixJob(gridmixDir, conf, mode,
-                                               runtimeArgs, otherArgs);
-    }
-    Assert.assertEquals("Gridmix jobs have failed.", 0 , exitCode);
-    gridmixJobCount = jtClient.getClient().getAllJobs().length - prvJobCount;
-  }
-
-  /**
-   * Get the submitted jobs count.
-   * @return count of no. of jobs submitted for a trace.
-   */
-  public int getGridmixJobCount() {
-     return gridmixJobCount;
-  }
-
-  /**
-   * Get the job configuration.
-   * @return Configuration of a submitted job.
-   */
-  public Configuration getJobConf() {
-    return conf;
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobVerification.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobVerification.java
deleted file mode 100644
index e448412..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobVerification.java
+++ /dev/null
@@ -1,1166 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix.test.system;
-
-import java.io.IOException;
-import java.io.File;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.Collections;
-import java.util.Set;
-import java.util.ArrayList;
-import java.util.Arrays;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.TaskCounter;
-import org.apache.hadoop.mapreduce.Counters;
-import org.apache.hadoop.mapreduce.Counter;
-import org.apache.hadoop.mapreduce.CounterGroup;
-import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-import org.apache.hadoop.mapreduce.test.system.JTClient;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.tools.rumen.LoggedJob;
-import org.apache.hadoop.tools.rumen.ZombieJob;
-import org.apache.hadoop.tools.rumen.TaskInfo;
-import org.junit.Assert;
-import java.text.ParseException;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.mapred.gridmix.GridmixSystemTestCase;
-
-/**
- * Verifying each Gridmix job with corresponding job story in a trace file.
- */
-public class GridmixJobVerification {
-
-  private static Log LOG = LogFactory.getLog(GridmixJobVerification.class);
-  private Path path;
-  private Configuration conf;
-  private JTClient jtClient;
-  private String userResolverVal;
-  static final String origJobIdKey = GridMixConfig.GRIDMIX_ORIGINAL_JOB_ID;
-  static final String jobSubKey = GridMixConfig.GRIDMIX_SUBMISSION_POLICY;
-  static final String jobTypeKey = GridMixConfig.GRIDMIX_JOB_TYPE;
-  static final String mapTaskKey = GridMixConfig.GRIDMIX_SLEEPJOB_MAPTASK_ONLY;
-  static final String usrResolver = GridMixConfig.GRIDMIX_USER_RESOLVER;
-  static final String fileOutputFormatKey = FileOutputFormat.COMPRESS;
-  static final String fileInputFormatKey = FileInputFormat.INPUT_DIR;
-  static final String compEmulKey = GridMixConfig.GRIDMIX_COMPRESSION_ENABLE;
-  static final String inputDecompKey = 
-      GridMixConfig.GRIDMIX_INPUT_DECOMPRESS_ENABLE;
-  static final String mapInputCompRatio = 
-      GridMixConfig.GRIDMIX_INPUT_COMPRESS_RATIO;
-  static final String mapOutputCompRatio = 
-      GridMixConfig.GRIDMIX_INTERMEDIATE_COMPRESSION_RATIO;
-  static final String reduceOutputCompRatio = 
-      GridMixConfig.GRIDMIX_OUTPUT_COMPRESSION_RATIO;
-  private Map<String, List<JobConf>> simuAndOrigJobsInfo = 
-      new HashMap<String, List<JobConf>>();
-
-  /**
-   * Gridmix job verification constructor
-   * @param path - path of the gridmix output directory.
-   * @param conf - cluster configuration.
-   * @param jtClient - jobtracker client.
-   */
-  public GridmixJobVerification(Path path, Configuration conf, 
-     JTClient jtClient) {
-    this.path = path;
-    this.conf = conf;
-    this.jtClient = jtClient;
-  }
-  
-  /**
-   * It verifies the Gridmix jobs with corresponding job story in a trace file.
-   * @param jobids - gridmix job ids.
-   * @throws IOException - if an I/O error occurs.
-   * @throws ParseException - if an parse error occurs.
-   */
-  public void verifyGridmixJobsWithJobStories(List<JobID> jobids) 
-      throws Exception {
-
-    SortedMap <Long, String> origSubmissionTime = new TreeMap <Long, String>();
-    SortedMap <Long, String> simuSubmissionTime = new TreeMap<Long, String>();
-    GridmixJobStory gjs = new GridmixJobStory(path, conf);
-    final Iterator<JobID> ite = jobids.iterator();
-    File destFolder = new File(System.getProperty("java.io.tmpdir") 
-                              + "/gridmix-st/");
-    destFolder.mkdir();
-    while (ite.hasNext()) {
-      JobID simuJobId = ite.next();
-      JobHistoryParser.JobInfo jhInfo = getSimulatedJobHistory(simuJobId);
-      Assert.assertNotNull("Job history not found.", jhInfo);
-      Counters counters = jhInfo.getTotalCounters();
-      JobConf simuJobConf = getSimulatedJobConf(simuJobId, destFolder);
-      String origJobId = simuJobConf.get(origJobIdKey);
-      LOG.info("OriginalJobID<->CurrentJobID:" 
-              + origJobId + "<->" + simuJobId);
-
-      if (userResolverVal == null) {
-        userResolverVal = simuJobConf.get(usrResolver);
-      }
-      ZombieJob zombieJob = gjs.getZombieJob(JobID.forName(origJobId));
-      Map<String, Long> mapJobCounters = getJobMapCounters(zombieJob);
-      Map<String, Long> reduceJobCounters = getJobReduceCounters(zombieJob);
-      if (simuJobConf.get(jobSubKey).contains("REPLAY")) {
-          origSubmissionTime.put(zombieJob.getSubmissionTime(), 
-                                 origJobId.toString() + "^" + simuJobId); 
-          simuSubmissionTime.put(jhInfo.getSubmitTime() , 
-                                 origJobId.toString() + "^" + simuJobId); ;
-      }
-
-      LOG.info("Verifying the job <" + simuJobId + "> and wait for a while...");
-      verifySimulatedJobSummary(zombieJob, jhInfo, simuJobConf);
-      verifyJobMapCounters(counters, mapJobCounters, simuJobConf);
-      verifyJobReduceCounters(counters, reduceJobCounters, simuJobConf); 
-      verifyCompressionEmulation(zombieJob.getJobConf(), simuJobConf, counters, 
-                                 reduceJobCounters, mapJobCounters);
-      verifyDistributeCache(zombieJob,simuJobConf);
-      setJobDistributedCacheInfo(simuJobId.toString(), simuJobConf, 
-         zombieJob.getJobConf());
-      verifyHighRamMemoryJobs(zombieJob, simuJobConf);
-      verifyCPUEmulationOfJobs(zombieJob, jhInfo, simuJobConf);
-      verifyMemoryEmulationOfJobs(zombieJob, jhInfo, simuJobConf);
-      LOG.info("Done.");
-    }
-    verifyDistributedCacheBetweenJobs(simuAndOrigJobsInfo);
-  }
-
-  /**
-   * Verify the job submission order between the jobs in replay mode.
-   * @param origSubmissionTime - sorted map of original jobs submission times.
-   * @param simuSubmissionTime - sorted map of simulated jobs submission times.
-   */
-  public void verifyJobSumissionTime(SortedMap<Long, String> origSubmissionTime, 
-      SortedMap<Long, String> simuSubmissionTime) { 
-    Assert.assertEquals("Simulated job's submission time count has " 
-                     + "not match with Original job's submission time count.", 
-                     origSubmissionTime.size(), simuSubmissionTime.size());
-    for ( int index = 0; index < origSubmissionTime.size(); index ++) {
-        String origAndSimuJobID = origSubmissionTime.get(index);
-        String simuAndorigJobID = simuSubmissionTime.get(index);
-        Assert.assertEquals("Simulated jobs have not submitted in same " 
-                           + "order as original jobs submitted in REPLAY mode.", 
-                           origAndSimuJobID, simuAndorigJobID);
-    }
-  }
-
-  /**
-   * It verifies the simulated job map counters.
-   * @param counters - Original job map counters.
-   * @param mapJobCounters - Simulated job map counters.
-   * @param jobConf - Simulated job configuration.
-   * @throws ParseException - If an parser error occurs.
-   */
-  public void verifyJobMapCounters(Counters counters, 
-     Map<String,Long> mapCounters, JobConf jobConf) throws ParseException {
-    if (!jobConf.get(jobTypeKey, "LOADJOB").equals("SLEEPJOB")) {
-      Assert.assertEquals("Map input records have not matched.",
-                          mapCounters.get("MAP_INPUT_RECS").longValue(), 
-                          getCounterValue(counters, "MAP_INPUT_RECORDS"));
-    } else {
-      Assert.assertTrue("Map Input Bytes are zero", 
-                        getCounterValue(counters,"HDFS_BYTES_READ") != 0);
-      Assert.assertNotNull("Map Input Records are zero", 
-                           getCounterValue(counters, "MAP_INPUT_RECORDS")!=0);
-    }
-  }
-
-  /**
-   *  It verifies the simulated job reduce counters.
-   * @param counters - Original job reduce counters.
-   * @param reduceCounters - Simulated job reduce counters.
-   * @param jobConf - simulated job configuration.
-   * @throws ParseException - if an parser error occurs.
-   */
-  public void verifyJobReduceCounters(Counters counters, 
-     Map<String,Long> reduceCounters, JobConf jobConf) throws ParseException {
-    if (jobConf.get(jobTypeKey, "LOADJOB").equals("SLEEPJOB")) {
-      Assert.assertTrue("Reduce output records are not zero for sleep job.",
-          getCounterValue(counters, "REDUCE_OUTPUT_RECORDS") == 0);
-      Assert.assertTrue("Reduce output bytes are not zero for sleep job.", 
-          getCounterValue(counters,"HDFS_BYTES_WRITTEN") == 0);
-    }
-  }
-
-  /**
-   * It verifies the gridmix simulated job summary.
-   * @param zombieJob - Original job summary.
-   * @param jhInfo  - Simulated job history info.
-   * @param jobConf - simulated job configuration.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void verifySimulatedJobSummary(ZombieJob zombieJob, 
-     JobHistoryParser.JobInfo jhInfo, JobConf jobConf) throws IOException {
-    Assert.assertEquals("Job id has not matched", zombieJob.getJobID(), 
-                        JobID.forName(jobConf.get(origJobIdKey)));
-
-    Assert.assertEquals("Job maps have not matched", zombieJob.getNumberMaps(),
-                        jhInfo.getTotalMaps());
-
-    if (!jobConf.getBoolean(mapTaskKey, false)) { 
-      Assert.assertEquals("Job reducers have not matched", 
-          zombieJob.getNumberReduces(), jhInfo.getTotalReduces());
-    } else {
-      Assert.assertEquals("Job reducers have not matched",
-                          0, jhInfo.getTotalReduces());
-    }
-
-    Assert.assertEquals("Job status has not matched.", 
-                        zombieJob.getOutcome().name(), 
-                        convertJobStatus(jhInfo.getJobStatus()));
-
-    LoggedJob loggedJob = zombieJob.getLoggedJob();
-    Assert.assertEquals("Job priority has not matched.", 
-                        loggedJob.getPriority().toString(), 
-                        jhInfo.getPriority());
-
-    if (jobConf.get(usrResolver).contains("RoundRobin")) {
-       String user = UserGroupInformation.getLoginUser().getShortUserName();
-       Assert.assertTrue(jhInfo.getJobId().toString() 
-                        + " has not impersonate with other user.", 
-                        !jhInfo.getUsername().equals(user));
-    }
-  }
-
-  /**
-   * Get the original job map counters from a trace.
-   * @param zombieJob - Original job story.
-   * @return - map counters as a map.
-   */
-  public Map<String, Long> getJobMapCounters(ZombieJob zombieJob) {
-    long expMapInputBytes = 0;
-    long expMapOutputBytes = 0;
-    long expMapInputRecs = 0;
-    long expMapOutputRecs = 0;
-    Map<String,Long> mapCounters = new HashMap<String,Long>();
-    for (int index = 0; index < zombieJob.getNumberMaps(); index ++) {
-      TaskInfo mapTask = zombieJob.getTaskInfo(TaskType.MAP, index);
-      expMapInputBytes += mapTask.getInputBytes();
-      expMapOutputBytes += mapTask.getOutputBytes();
-      expMapInputRecs += mapTask.getInputRecords();
-      expMapOutputRecs += mapTask.getOutputRecords();
-    }
-    mapCounters.put("MAP_INPUT_BYTES", expMapInputBytes);
-    mapCounters.put("MAP_OUTPUT_BYTES", expMapOutputBytes);
-    mapCounters.put("MAP_INPUT_RECS", expMapInputRecs);
-    mapCounters.put("MAP_OUTPUT_RECS", expMapOutputRecs);
-    return mapCounters;
-  }
-  
-  /**
-   * Get the original job reduce counters from a trace.
-   * @param zombieJob - Original job story.
-   * @return - reduce counters as a map.
-   */
-  public Map<String,Long> getJobReduceCounters(ZombieJob zombieJob) {
-    long expReduceInputBytes = 0;
-    long expReduceOutputBytes = 0;
-    long expReduceInputRecs = 0;
-    long expReduceOutputRecs = 0;
-    Map<String,Long> reduceCounters = new HashMap<String,Long>();
-    for (int index = 0; index < zombieJob.getNumberReduces(); index ++) {
-      TaskInfo reduceTask = zombieJob.getTaskInfo(TaskType.REDUCE, index);
-      expReduceInputBytes += reduceTask.getInputBytes();
-      expReduceOutputBytes += reduceTask.getOutputBytes();
-      expReduceInputRecs += reduceTask.getInputRecords();
-      expReduceOutputRecs += reduceTask.getOutputRecords();
-    }
-    reduceCounters.put("REDUCE_INPUT_BYTES", expReduceInputBytes);
-    reduceCounters.put("REDUCE_OUTPUT_BYTES", expReduceOutputBytes);
-    reduceCounters.put("REDUCE_INPUT_RECS", expReduceInputRecs);
-    reduceCounters.put("REDUCE_OUTPUT_RECS", expReduceOutputRecs);
-    return reduceCounters;
-  }
-
-  /**
-   * Get the simulated job configuration of a job.
-   * @param simulatedJobID - Simulated job id.
-   * @param tmpJHFolder - temporary job history folder location.
-   * @return - simulated job configuration.
-   * @throws IOException - If an I/O error occurs.
-   */
-  public JobConf getSimulatedJobConf(JobID simulatedJobID, File tmpJHFolder) 
-      throws IOException{
-    FileSystem fs = null;
-    try {
-
-      String historyFilePath = 
-         jtClient.getProxy().getJobHistoryLocationForRetiredJob(simulatedJobID);
-      Path jhpath = new Path(historyFilePath);
-      fs = jhpath.getFileSystem(conf);
-      fs.copyToLocalFile(jhpath,new Path(tmpJHFolder.toString()));
-      String historyPath =
-          historyFilePath.substring(0,historyFilePath.lastIndexOf("_"));
-      fs.copyToLocalFile(new Path(historyPath + "_conf.xml"), 
-                         new Path(tmpJHFolder.toString()));
-      JobConf jobConf = new JobConf();
-      jobConf.addResource(new Path(tmpJHFolder.toString() 
-                         + "/" + simulatedJobID + "_conf.xml"));
-      jobConf.reloadConfiguration();
-      return jobConf;
-
-    }finally {
-      fs.close();
-    }
-  }
-
-  /**
-   * Get the simulated job history of a job.
-   * @param simulatedJobID - simulated job id.
-   * @return - simulated job information.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public JobHistoryParser.JobInfo getSimulatedJobHistory(JobID simulatedJobID) 
-      throws IOException {
-    FileSystem fs = null;
-    try {
-      String historyFilePath = jtClient.getProxy().
-          getJobHistoryLocationForRetiredJob(simulatedJobID);
-      Path jhpath = new Path(historyFilePath);
-      fs = jhpath.getFileSystem(conf);
-      JobHistoryParser jhparser = new JobHistoryParser(fs, jhpath);
-      JobHistoryParser.JobInfo jhInfo = jhparser.parse();
-      return jhInfo;
-
-    } finally {
-      fs.close();
-    }
-  }
-
-  /**
-   * It verifies the heap memory resource usage of gridmix jobs with
-   * corresponding original job in the trace.
-   * @param zombieJob - Original job history.
-   * @param jhInfo - Simulated job history.
-   * @param simuJobConf - simulated job configuration.
-   */
-  public void verifyMemoryEmulationOfJobs(ZombieJob zombieJob,
-                 JobHistoryParser.JobInfo jhInfo,
-                                 JobConf simuJobConf) throws Exception {
-    long origJobMapsTHU = 0;
-    long origJobReducesTHU = 0;
-    long simuJobMapsTHU = 0;
-    long simuJobReducesTHU = 0;
-    boolean isMemEmulOn = false;
-    if (simuJobConf.get(GridMixConfig.GRIDMIX_MEMORY_EMULATON) != null) {
-      isMemEmulOn = 
-          simuJobConf.get(GridMixConfig.GRIDMIX_MEMORY_EMULATON).
-              contains(GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN);
-    }
-
-    if (isMemEmulOn) {
-      for (int index = 0; index < zombieJob.getNumberMaps(); index ++) {
-        TaskInfo mapTask = zombieJob.getTaskInfo(TaskType.MAP, index);
-        if (mapTask.getResourceUsageMetrics().getHeapUsage() > 0) {
-          origJobMapsTHU += 
-                  mapTask.getResourceUsageMetrics().getHeapUsage();
-        }
-      }
-      LOG.info("Original Job Maps Total Heap Usage: " + origJobMapsTHU);
-
-      for (int index = 0; index < zombieJob.getNumberReduces(); index ++) {
-        TaskInfo reduceTask = zombieJob.getTaskInfo(TaskType.REDUCE, index);
-        if (reduceTask.getResourceUsageMetrics().getHeapUsage() > 0) {
-          origJobReducesTHU += 
-                  reduceTask.getResourceUsageMetrics().getHeapUsage();
-        }
-      }
-      LOG.info("Original Job Reduces Total Heap Usage: " + origJobReducesTHU);
-
-      simuJobMapsTHU = 
-          getCounterValue(jhInfo.getMapCounters(), 
-                          TaskCounter.COMMITTED_HEAP_BYTES.toString());
-      LOG.info("Simulated Job Maps Total Heap Usage: " + simuJobMapsTHU);
-
-      simuJobReducesTHU = 
-          getCounterValue(jhInfo.getReduceCounters(), 
-                          TaskCounter.COMMITTED_HEAP_BYTES.toString());
-      LOG.info("Simulated Jobs Reduces Total Heap Usage: " + simuJobReducesTHU);
-
-      long mapCount = jhInfo.getTotalMaps();
-      long reduceCount = jhInfo.getTotalReduces();
-
-      String strHeapRatio =
-          simuJobConf.get(GridMixConfig.GRIDMIX_HEAP_FREE_MEMORY_RATIO);
-      if (strHeapRatio == null) {
-        strHeapRatio = "0.3F";
-      }
-
-      if (mapCount > 0) {
-        double mapEmulFactor = (simuJobMapsTHU * 100) / origJobMapsTHU;
-        long mapEmulAccuracy = Math.round(mapEmulFactor);
-        LOG.info("Maps memory emulation accuracy of a job:" 
-                + mapEmulAccuracy + "%");
-        Assert.assertTrue("Map phase total memory emulation had crossed the "
-                         + "configured max limit.", mapEmulAccuracy 
-                         <= GridMixConfig.GRIDMIX_MEMORY_EMULATION_UPPER_LIMIT);
-        Assert.assertTrue("Map phase total memory emulation had not crossed " 
-                         + "the configured min limit.", mapEmulAccuracy 
-                         >= GridMixConfig.GRIDMIX_MEMORY_EMULATION_LOWER_LIMIT);
-        double expHeapRatio = Double.parseDouble(strHeapRatio);
-        LOG.info("expHeapRatio for maps:" + expHeapRatio);
-        double actHeapRatio = 
-                ((double)Math.abs(origJobMapsTHU - simuJobMapsTHU)) ;
-        actHeapRatio /= origJobMapsTHU;
-          LOG.info("actHeapRatio for maps:" + actHeapRatio);
-          Assert.assertTrue("Simulate job maps heap ratio not matched.",
-                            actHeapRatio <= expHeapRatio); 
-      }
-
-      if (reduceCount >0) {
-        double reduceEmulFactor = (simuJobReducesTHU * 100) / origJobReducesTHU;
-        long reduceEmulAccuracy = Math.round(reduceEmulFactor);
-        LOG.info("Reduces memory emulation accuracy of a job:" 
-                + reduceEmulAccuracy + "%");
-        Assert.assertTrue("Reduce phase total memory emulation had crossed "
-                         + "configured max limit.", reduceEmulAccuracy 
-                         <= GridMixConfig.GRIDMIX_MEMORY_EMULATION_UPPER_LIMIT); 
-        Assert.assertTrue("Reduce phase total memory emulation had not " 
-                         + "crosssed configured min limit.", reduceEmulAccuracy 
-                         >= GridMixConfig.GRIDMIX_MEMORY_EMULATION_LOWER_LIMIT);
-        double expHeapRatio = Double.parseDouble(strHeapRatio);
-        LOG.info("expHeapRatio for reduces:" + expHeapRatio);
-        double actHeapRatio = 
-                ((double)Math.abs(origJobReducesTHU - simuJobReducesTHU));
-        actHeapRatio /= origJobReducesTHU;
-          LOG.info("actHeapRatio for reduces:" + actHeapRatio);
-          Assert.assertTrue("Simulate job reduces heap ratio not matched.",
-                            actHeapRatio <= expHeapRatio); 
-      }
-    }
-  }
-
-  /**
-   * It verifies the cpu resource usage of  a gridmix job against
-   * their original job.
-   * @param origJobHistory - Original job history.
-   * @param simuJobHistoryInfo - Simulated job history.
-   * @param simuJobConf - simulated job configuration.
-   */
-  public void verifyCPUEmulationOfJobs(ZombieJob origJobHistory,
-       JobHistoryParser.JobInfo simuJobHistoryInfo,
-       JobConf simuJobConf) throws Exception {
-
-    boolean isCpuEmulOn = false;
-    if (simuJobConf.get(GridMixConfig.GRIDMIX_CPU_EMULATON) != null) {
-      isCpuEmulOn = 
-          simuJobConf.get(GridMixConfig.GRIDMIX_CPU_EMULATON).
-              contains(GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN);
-    }
-
-    if (isCpuEmulOn) {
-      Map<String,Long> origJobMetrics =
-                       getOriginalJobCPUMetrics(origJobHistory);
-      Map<String,Long> simuJobMetrics =
-                       getSimulatedJobCPUMetrics(simuJobHistoryInfo);
-
-      long origMapUsage = origJobMetrics.get("MAP");
-      LOG.info("Maps cpu usage of original job:" + origMapUsage);
-
-      long origReduceUsage = origJobMetrics.get("REDUCE");
-      LOG.info("Reduces cpu usage of original job:" + origReduceUsage);
-
-      long simuMapUsage = simuJobMetrics.get("MAP");
-      LOG.info("Maps cpu usage of simulated job:" + simuMapUsage);
-
-      long simuReduceUsage = simuJobMetrics.get("REDUCE");
-      LOG.info("Reduces cpu usage of simulated job:"+ simuReduceUsage);
-
-      long mapCount = simuJobHistoryInfo.getTotalMaps(); 
-      long reduceCount = simuJobHistoryInfo.getTotalReduces(); 
-
-      if (mapCount > 0) {
-        double mapEmulFactor = (simuMapUsage * 100) / origMapUsage;
-        long mapEmulAccuracy = Math.round(mapEmulFactor);
-        LOG.info("CPU emulation accuracy for maps in job " + 
-                 simuJobHistoryInfo.getJobId() + 
-                 ":"+ mapEmulAccuracy + "%");
-        Assert.assertTrue("Map-side cpu emulaiton inaccurate!" +
-                          " Actual cpu usage: " + simuMapUsage +
-                          " Expected cpu usage: " + origMapUsage, mapEmulAccuracy
-                          >= GridMixConfig.GRIDMIX_CPU_EMULATION_LOWER_LIMIT
-                          && mapEmulAccuracy
-                          <= GridMixConfig.GRIDMIX_CPU_EMULATION_UPPER_LIMIT);
-      }
-
-      if (reduceCount >0) {
-        double reduceEmulFactor = (simuReduceUsage * 100) / origReduceUsage;
-        long reduceEmulAccuracy = Math.round(reduceEmulFactor);
-        LOG.info("CPU emulation accuracy for reduces in job " + 
-                 simuJobHistoryInfo.getJobId() + 
-                 ": " + reduceEmulAccuracy + "%");
-        Assert.assertTrue("Reduce side cpu emulaiton inaccurate!" +
-                          " Actual cpu usage:" + simuReduceUsage +
-                          "Expected cpu usage: " + origReduceUsage,  
-                          reduceEmulAccuracy
-                          >= GridMixConfig.GRIDMIX_CPU_EMULATION_LOWER_LIMIT
-                          && reduceEmulAccuracy
-                          <= GridMixConfig.GRIDMIX_CPU_EMULATION_UPPER_LIMIT);
-      }
-    }
-  }
-
-  /**
-   *  Get the simulated job cpu metrics.
-   * @param jhInfo - Simulated job history
-   * @return - cpu metrics as a map.
-   * @throws Exception - if an error occurs.
-   */
-  private Map<String,Long> getSimulatedJobCPUMetrics(
-          JobHistoryParser.JobInfo jhInfo) throws Exception {
-    Map<String, Long> resourceMetrics = new HashMap<String, Long>();
-    long mapCPUUsage = 
-        getCounterValue(jhInfo.getMapCounters(), 
-                        TaskCounter.CPU_MILLISECONDS.toString());
-    resourceMetrics.put("MAP", mapCPUUsage);
-    long reduceCPUUsage = 
-        getCounterValue(jhInfo.getReduceCounters(), 
-                        TaskCounter.CPU_MILLISECONDS.toString());
-    resourceMetrics.put("REDUCE", reduceCPUUsage);
-    return resourceMetrics;
-  }
-
-  /**
-   * Get the original job cpu metrics.
-   * @param zombieJob - original job history.
-   * @return - cpu metrics as map.
-   */
-  private Map<String, Long> getOriginalJobCPUMetrics(ZombieJob zombieJob) {
-    long mapTotalCPUUsage = 0;
-    long reduceTotalCPUUsage = 0;
-    Map<String,Long> resourceMetrics = new HashMap<String,Long>();
-
-    for (int index = 0; index < zombieJob.getNumberMaps(); index ++) {
-      TaskInfo mapTask = zombieJob.getTaskInfo(TaskType.MAP, index);
-      if (mapTask.getResourceUsageMetrics().getCumulativeCpuUsage() > 0) {
-        mapTotalCPUUsage += 
-            mapTask.getResourceUsageMetrics().getCumulativeCpuUsage();
-      }
-    }
-    resourceMetrics.put("MAP", mapTotalCPUUsage); 
-    
-    for (int index = 0; index < zombieJob.getNumberReduces(); index ++) {
-      TaskInfo reduceTask = zombieJob.getTaskInfo(TaskType.REDUCE, index);
-      if (reduceTask.getResourceUsageMetrics().getCumulativeCpuUsage() > 0) {
-        reduceTotalCPUUsage += 
-            reduceTask.getResourceUsageMetrics().getCumulativeCpuUsage();
-      }
-    }
-    resourceMetrics.put("REDUCE", reduceTotalCPUUsage);
-    return resourceMetrics;
-  }
-  
-  /**
-   * Get the user resolver of a job.
-   */
-  public String getJobUserResolver() {
-    return userResolverVal;
-  }
-
-  /**
-   * It verifies the compression ratios of mapreduce jobs.
-   * @param origJobConf - original job configuration.
-   * @param simuJobConf - simulated job configuration.
-   * @param counters  - simulated job counters.
-   * @param origReduceCounters - original job reduce counters.
-   * @param origMapCounters - original job map counters.
-   * @throws ParseException - if a parser error occurs.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void verifyCompressionEmulation(JobConf origJobConf, 
-                                         JobConf simuJobConf,Counters counters, 
-                                         Map<String, Long> origReduceCounters, 
-                                         Map<String, Long> origMapJobCounters) 
-                                         throws ParseException,IOException { 
-    if (simuJobConf.getBoolean(compEmulKey, false)) {
-      String inputDir = origJobConf.get(fileInputFormatKey);
-      Assert.assertNotNull(fileInputFormatKey + " is Null",inputDir);
-      long simMapInputBytes = getCounterValue(counters, "HDFS_BYTES_READ");
-      long uncompressedInputSize = origMapJobCounters.get("MAP_INPUT_BYTES"); 
-      long simReduceInputBytes =
-            getCounterValue(counters, "REDUCE_SHUFFLE_BYTES");
-        long simMapOutputBytes = getCounterValue(counters, "MAP_OUTPUT_BYTES");
-
-      // Verify input compression whether it's enable or not.
-      if (inputDir.contains(".gz") || inputDir.contains(".tgz") 
-         || inputDir.contains(".bz")) { 
-        Assert.assertTrue("Input decompression attribute has been not set for " 
-                         + "for compressed input",
-                         simuJobConf.getBoolean(inputDecompKey, false));
-
-        float INPUT_COMP_RATIO = 
-            getExpectedCompressionRatio(simuJobConf, mapInputCompRatio);
-        float INTERMEDIATE_COMP_RATIO = 
-            getExpectedCompressionRatio(simuJobConf, mapOutputCompRatio);
-
-        // Verify Map Input Compression Ratio.
-        assertMapInputCompressionRatio(simMapInputBytes, uncompressedInputSize, 
-                                       INPUT_COMP_RATIO);
-
-        // Verify Map Output Compression Ratio.
-        assertMapOuputCompressionRatio(simReduceInputBytes, simMapOutputBytes, 
-                                       INTERMEDIATE_COMP_RATIO);
-      } else {
-        Assert.assertEquals("MAP input bytes has not matched.", 
-                            convertBytes(uncompressedInputSize), 
-                            convertBytes(simMapInputBytes));
-      }
-
-      Assert.assertEquals("Simulated job output format has not matched with " 
-                         + "original job output format.",
-                         origJobConf.getBoolean(fileOutputFormatKey,false), 
-                         simuJobConf.getBoolean(fileOutputFormatKey,false));
-
-      if (simuJobConf.getBoolean(fileOutputFormatKey,false)) { 
-        float OUTPUT_COMP_RATIO = 
-            getExpectedCompressionRatio(simuJobConf, reduceOutputCompRatio);
-
-         //Verify reduce output compression ratio.
-         long simReduceOutputBytes = 
-             getCounterValue(counters, "HDFS_BYTES_WRITTEN");
-         long origReduceOutputBytes = 
-             origReduceCounters.get("REDUCE_OUTPUT_BYTES");
-         assertReduceOutputCompressionRatio(simReduceOutputBytes, 
-                                            origReduceOutputBytes, 
-                                            OUTPUT_COMP_RATIO);
-      }
-    }
-  }
-
-  private void assertMapInputCompressionRatio(long simMapInputBytes, 
-                                   long origMapInputBytes, 
-                                   float expInputCompRatio) { 
-    LOG.info("***Verify the map input bytes compression ratio****");
-    LOG.info("Simulated job's map input bytes(REDUCE_SHUFFLE_BYTES): " 
-            + simMapInputBytes);
-    LOG.info("Original job's map input bytes: " + origMapInputBytes);
-
-    final float actInputCompRatio = 
-        getActualCompressionRatio(simMapInputBytes, origMapInputBytes);
-    LOG.info("Expected Map Input Compression Ratio:" + expInputCompRatio);
-    LOG.info("Actual Map Input Compression Ratio:" + actInputCompRatio);
-
-    float diffVal = (float)(expInputCompRatio * 0.06);
-    LOG.info("Expected Difference of Map Input Compression Ratio is <= " + 
-            + diffVal);
-    float delta = Math.abs(expInputCompRatio - actInputCompRatio);
-    LOG.info("Actual Difference of Map Iput Compression Ratio:" + delta);
-    Assert.assertTrue("Simulated job input compression ratio has mismatched.", 
-                      delta <= diffVal);
-    LOG.info("******Done******");
-  }
-
-  private void assertMapOuputCompressionRatio(long simReduceInputBytes, 
-                                              long simMapoutputBytes, 
-                                              float expMapOuputCompRatio) { 
-    LOG.info("***Verify the map output bytes compression ratio***");
-    LOG.info("Simulated job reduce input bytes:" + simReduceInputBytes);
-    LOG.info("Simulated job map output bytes:" + simMapoutputBytes);
-
-    final float actMapOutputCompRatio = 
-        getActualCompressionRatio(simReduceInputBytes, simMapoutputBytes);
-    LOG.info("Expected Map Output Compression Ratio:" + expMapOuputCompRatio);
-    LOG.info("Actual Map Output Compression Ratio:" + actMapOutputCompRatio);
-
-    float diffVal = 0.05f;
-    LOG.info("Expected Difference Of Map Output Compression Ratio is <= " 
-            + diffVal);
-    float delta = Math.abs(expMapOuputCompRatio - actMapOutputCompRatio);
-    LOG.info("Actual Difference Of Map Ouput Compression Ratio :" + delta);
-
-    Assert.assertTrue("Simulated job map output compression ratio " 
-                     + "has not been matched.", delta <= diffVal);
-    LOG.info("******Done******");
-  }
-
-  private void assertReduceOutputCompressionRatio(long simReduceOutputBytes, 
-      long origReduceOutputBytes , float expOutputCompRatio ) {
-      LOG.info("***Verify the reduce output bytes compression ratio***");
-      final float actOuputputCompRatio = 
-          getActualCompressionRatio(simReduceOutputBytes, origReduceOutputBytes);
-      LOG.info("Simulated job's reduce output bytes:" + simReduceOutputBytes);
-      LOG.info("Original job's reduce output bytes:" + origReduceOutputBytes);
-      LOG.info("Expected output compression ratio:" + expOutputCompRatio);
-      LOG.info("Actual output compression ratio:" + actOuputputCompRatio);
-      long diffVal = (long)(origReduceOutputBytes * 0.15);
-      long delta = Math.abs(origReduceOutputBytes - simReduceOutputBytes);
-      LOG.info("Expected difference of output compressed bytes is <= " 
-              + diffVal);
-      LOG.info("Actual difference of compressed ouput bytes:" + delta);
-      Assert.assertTrue("Simulated job reduce output compression ratio " +
-         "has not been matched.", delta <= diffVal);
-      LOG.info("******Done******");
-  }
-
-  private float getExpectedCompressionRatio(JobConf simuJobConf, 
-                                            String RATIO_TYPE) {
-    // Default decompression ratio is 0.50f irrespective of original 
-    //job compression ratio.
-    if (simuJobConf.get(RATIO_TYPE) != null) {
-      return Float.parseFloat(simuJobConf.get(RATIO_TYPE));
-    } else {
-      return 0.50f;
-    }
-  }
-
-  private float getActualCompressionRatio(long compressBytes, 
-                                          long uncompessBytes) {
-    double ratio = ((double)compressBytes) / uncompessBytes; 
-    int significant = (int)Math.round(ratio * 100);
-    return ((float)significant)/100; 
-  }
-
-  /**
-   * Verify the distributed cache files between the jobs in a gridmix run.
-   * @param jobsInfo - jobConfs of simulated and original jobs as a map.
-   */
-  public void verifyDistributedCacheBetweenJobs(
-      Map<String,List<JobConf>> jobsInfo) {
-     if (jobsInfo.size() > 1) {
-       Map<String, Integer> simJobfilesOccurBtnJobs = 
-           getDistcacheFilesOccurenceBetweenJobs(jobsInfo, 0);
-       Map<String, Integer> origJobfilesOccurBtnJobs = 
-           getDistcacheFilesOccurenceBetweenJobs(jobsInfo, 1);
-       List<Integer> simuOccurList = 
-           getMapValuesAsList(simJobfilesOccurBtnJobs);
-       Collections.sort(simuOccurList);
-       List<Integer> origOccurList = 
-           getMapValuesAsList(origJobfilesOccurBtnJobs);
-       Collections.sort(origOccurList);
-       Assert.assertEquals("The unique count of distibuted cache files in " 
-                        + "simulated jobs have not matched with the unique "
-                        + "count of original jobs distributed files ", 
-                        simuOccurList.size(), origOccurList.size());
-       int index = 0;
-       for (Integer origDistFileCount : origOccurList) {
-         Assert.assertEquals("Distributed cache file reused in simulated " 
-                            + "jobs has not matched with reused of distributed"
-                            + "cache file in original jobs.",
-                            origDistFileCount, simuOccurList.get(index));
-         index ++;
-       }
-     }
-  }
-
-  /**
-   * Get the unique distributed cache files and occurrence between the jobs.
-   * @param jobsInfo - job's configurations as a map.
-   * @param jobConfIndex - 0 for simulated job configuration and 
-   *                       1 for original jobs configuration.
-   * @return  - unique distributed cache files and occurrences as map.
-   */
-  private Map<String, Integer> getDistcacheFilesOccurenceBetweenJobs(
-      Map<String, List<JobConf>> jobsInfo, int jobConfIndex) {
-    Map<String,Integer> filesOccurBtnJobs = new HashMap <String,Integer>();
-    Set<String> jobIds = jobsInfo.keySet();
-    Iterator<String > ite = jobIds.iterator();
-    while (ite.hasNext()) {
-      String jobId = ite.next();
-      List<JobConf> jobconfs = jobsInfo.get(jobId);
-      String [] distCacheFiles = jobconfs.get(jobConfIndex).get(
-          GridMixConfig.GRIDMIX_DISTCACHE_FILES).split(",");
-      String [] distCacheFileTimeStamps = jobconfs.get(jobConfIndex).get(
-          GridMixConfig.GRIDMIX_DISTCACHE_TIMESTAMP).split(",");
-      String [] distCacheFileVisib = jobconfs.get(jobConfIndex).get(
-          GridMixConfig.GRIDMIX_DISTCACHE_VISIBILITIES).split(",");
-      int indx = 0;
-      for (String distCacheFile : distCacheFiles) {
-        String fileAndSize = distCacheFile + "^" 
-                           + distCacheFileTimeStamps[indx] + "^" 
-                           + jobconfs.get(jobConfIndex).getUser();
-        if (filesOccurBtnJobs.get(fileAndSize) != null) {
-          int count = filesOccurBtnJobs.get(fileAndSize);
-          count ++;
-          filesOccurBtnJobs.put(fileAndSize, count);
-        } else {
-          filesOccurBtnJobs.put(fileAndSize, 1);
-        }
-      }
-    }
-    return filesOccurBtnJobs;
-  }
-
-  /**
-   * It verifies the distributed cache emulation of  a job.
-   * @param zombieJob - Original job story.
-   * @param simuJobConf - Simulated job configuration.
-   */
-  public void verifyDistributeCache(ZombieJob zombieJob, 
-                                    JobConf simuJobConf) throws IOException {
-    if (simuJobConf.getBoolean(GridMixConfig.GRIDMIX_DISTCACHE_ENABLE, false)) {
-      JobConf origJobConf = zombieJob.getJobConf();
-      assertFileVisibility(simuJobConf);
-      assertDistcacheFiles(simuJobConf,origJobConf);
-      assertFileSizes(simuJobConf,origJobConf);
-      assertFileStamps(simuJobConf,origJobConf);
-    } else {
-      Assert.assertNull("Configuration has distributed cache visibilites" 
-          + "without enabled distributed cache emulation.", 
-          simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_VISIBILITIES));
-      Assert.assertNull("Configuration has distributed cache files time " 
-          + "stamps without enabled distributed cache emulation.", 
-          simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_TIMESTAMP));
-      Assert.assertNull("Configuration has distributed cache files paths" 
-          + "without enabled distributed cache emulation.", 
-          simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_FILES));
-      Assert.assertNull("Configuration has distributed cache files sizes" 
-          + "without enabled distributed cache emulation.", 
-          simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_FILESSIZE));
-    }
-  }
-
-  private void assertFileStamps(JobConf simuJobConf, JobConf origJobConf) {
-    //Verify simulated jobs against distributed cache files time stamps.
-    String [] origDCFTS = 
-        origJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_TIMESTAMP).split(",");
-    String [] simuDCFTS = 
-        simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_TIMESTAMP).split(",");
-    for (int index = 0; index < origDCFTS.length; index++) { 
-      Assert.assertTrue("Invalid time stamps between original "
-          +"and simulated job", Long.parseLong(origDCFTS[index]) 
-          < Long.parseLong(simuDCFTS[index]));
-    }
-  }
-
-  private void assertFileVisibility(JobConf simuJobConf ) {
-    // Verify simulated jobs against distributed cache files visibilities.
-    String [] distFiles = 
-        simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_FILES).split(",");
-    String [] simuDistVisibilities = 
-        simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_VISIBILITIES).split(",");
-    List<Boolean> expFileVisibility = new ArrayList<Boolean >();
-    int index = 0;
-    for (String distFile : distFiles) {
-      boolean isLocalDistCache = GridmixSystemTestCase.isLocalDistCache(
-                                 distFile, 
-                                 simuJobConf.getUser(), 
-                                 Boolean.valueOf(simuDistVisibilities[index]));
-      if (!isLocalDistCache) {
-        expFileVisibility.add(true);
-      } else {
-        expFileVisibility.add(false);
-      }
-      index ++;
-    }
-    index = 0;
-    for (String actFileVisibility :  simuDistVisibilities) {
-      Assert.assertEquals("Simulated job distributed cache file " 
-                         + "visibilities has not matched.", 
-                         expFileVisibility.get(index),
-                         Boolean.valueOf(actFileVisibility));
-      index ++;
-    }
-  }
-  
-  private void assertDistcacheFiles(JobConf simuJobConf, JobConf origJobConf) 
-      throws IOException {
-    //Verify simulated jobs against distributed cache files.
-    String [] origDistFiles = origJobConf.get(
-        GridMixConfig.GRIDMIX_DISTCACHE_FILES).split(",");
-    String [] simuDistFiles = simuJobConf.get(
-        GridMixConfig.GRIDMIX_DISTCACHE_FILES).split(",");
-    String [] simuDistVisibilities = simuJobConf.get(
-        GridMixConfig.GRIDMIX_DISTCACHE_VISIBILITIES).split(",");
-    Assert.assertEquals("No. of simulatued job's distcache files mismacted" 
-                       + "with no.of original job's distcache files", 
-                       origDistFiles.length, simuDistFiles.length);
-
-    int index = 0;
-    for (String simDistFile : simuDistFiles) {
-      Path distPath = new Path(simDistFile);
-      boolean isLocalDistCache = 
-          GridmixSystemTestCase.isLocalDistCache(simDistFile,
-              simuJobConf.getUser(),
-              Boolean.valueOf(simuDistVisibilities[index]));
-      if (!isLocalDistCache) {
-        FileSystem fs = distPath.getFileSystem(conf);
-        FileStatus fstat = fs.getFileStatus(distPath);
-        FsPermission permission = fstat.getPermission();
-        Assert.assertTrue("HDFS distributed cache file has wrong " 
-                         + "permissions for users.", 
-                         FsAction.READ_WRITE.SYMBOL 
-                         == permission.getUserAction().SYMBOL);
-        Assert.assertTrue("HDFS distributed cache file has wrong " 
-                         + "permissions for groups.", 
-                         FsAction.READ.SYMBOL 
-                         == permission.getGroupAction().SYMBOL);
-        Assert.assertTrue("HDSFS distributed cache file has wrong " 
-                         + "permissions for others.", 
-                         FsAction.READ.SYMBOL 
-                         == permission.getOtherAction().SYMBOL);
-      }
-      index++;
-    }
-  }
-
-  private void assertFileSizes(JobConf simuJobConf, JobConf origJobConf) { 
-    // Verify simulated jobs against distributed cache files size.
-    List<String> origDistFilesSize = 
-        Arrays.asList(origJobConf.get(
-            GridMixConfig.GRIDMIX_DISTCACHE_FILESSIZE).split(","));
-    Collections.sort(origDistFilesSize);
-
-    List<String> simuDistFilesSize = 
-        Arrays.asList(simuJobConf.get(
-            GridMixConfig.GRIDMIX_DISTCACHE_FILESSIZE).split(","));
-    Collections.sort(simuDistFilesSize);
-
-    Assert.assertEquals("Simulated job's file size list has not " 
-                       + "matched with the Original job's file size list.",
-                       origDistFilesSize.size(),
-                       simuDistFilesSize.size());
-
-    for (int index = 0; index < origDistFilesSize.size(); index ++) {
-       Assert.assertEquals("Simulated job distcache file size has not " 
-                          + "matched with original job distcache file size.", 
-                          origDistFilesSize.get(index), 
-                          simuDistFilesSize.get(index));
-    }
-  }
-
-  private void setJobDistributedCacheInfo(String jobId, JobConf simuJobConf, 
-     JobConf origJobConf) { 
-    if (simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_FILES) != null) {
-      List<JobConf> jobConfs = new ArrayList<JobConf>();
-      jobConfs.add(simuJobConf);
-      jobConfs.add(origJobConf);
-      simuAndOrigJobsInfo.put(jobId,jobConfs);
-    }
-  }
-
-  private List<Integer> getMapValuesAsList(Map<String,Integer> jobOccurs) { 
-    List<Integer> occursList = new ArrayList<Integer>();
-    Set<String> files = jobOccurs.keySet();
-    Iterator<String > ite = files.iterator();
-    while (ite.hasNext()) {
-      String file = ite.next(); 
-      occursList.add(jobOccurs.get(file));
-    }
-    return occursList;
-  }
-
-  /**
-   * It verifies the high ram gridmix jobs.
-   * @param zombieJob - Original job story.
-   * @param simuJobConf - Simulated job configuration.
-   */
-  @SuppressWarnings("deprecation")
-  public void verifyHighRamMemoryJobs(ZombieJob zombieJob,
-                                      JobConf simuJobConf) {
-    JobConf origJobConf = zombieJob.getJobConf();
-    int origMapFactor = getMapFactor(origJobConf);
-    int origReduceFactor = getReduceFactor(origJobConf);
-    boolean isHighRamEnable = 
-        simuJobConf.getBoolean(GridMixConfig.GRIDMIX_HIGH_RAM_JOB_ENABLE, 
-                               false);
-    if (isHighRamEnable) {
-        if (origMapFactor >= 2 && origReduceFactor >= 2) {
-          assertGridMixHighRamJob(simuJobConf, origJobConf, 1);
-        } else if(origMapFactor >= 2) {
-          assertGridMixHighRamJob(simuJobConf, origJobConf, 2);
-        } else if(origReduceFactor >= 2) {
-          assertGridMixHighRamJob(simuJobConf, origJobConf, 3);
-        }
-    } else {
-        if (origMapFactor >= 2 && origReduceFactor >= 2) {
-              assertGridMixHighRamJob(simuJobConf, origJobConf, 4);
-        } else if(origMapFactor >= 2) {
-              assertGridMixHighRamJob(simuJobConf, origJobConf, 5);
-        } else if(origReduceFactor >= 2) {
-              assertGridMixHighRamJob(simuJobConf, origJobConf, 6);
-        }
-    }
-  }
-
-  /**
-   * Get the value for identifying the slots used by the map.
-   * @param jobConf - job configuration
-   * @return - map factor value.
-   */
-  public static int getMapFactor(Configuration jobConf) {
-    long clusterMapMem = 
-        Long.parseLong(jobConf.get(GridMixConfig.CLUSTER_MAP_MEMORY));
-    long jobMapMem = 
-        Long.parseLong(jobConf.get(GridMixConfig.JOB_MAP_MEMORY_MB));
-    return (int)Math.ceil((double)jobMapMem / clusterMapMem);  
-  }
-
-  /**
-   * Get the value for identifying the slots used by the reduce.
-   * @param jobConf - job configuration.
-   * @return - reduce factor value.
-   */
-  public static int getReduceFactor(Configuration jobConf) {
-    long clusterReduceMem = 
-        Long.parseLong(jobConf.get(GridMixConfig.CLUSTER_REDUCE_MEMORY));
-    long jobReduceMem = 
-        Long.parseLong(jobConf.get(GridMixConfig.JOB_REDUCE_MEMORY_MB));
-    return (int)Math.ceil((double)jobReduceMem / clusterReduceMem);
-  }
-
-  @SuppressWarnings("deprecation")
-  private void assertGridMixHighRamJob(JobConf simuJobConf, 
-                                       Configuration origConf, int option) {
-    int simuMapFactor = getMapFactor(simuJobConf);
-    int simuReduceFactor = getReduceFactor(simuJobConf);
-    /**
-     *  option 1 : Both map and reduce honors the high ram.
-     *  option 2 : Map only honors the high ram.
-     *  option 3 : Reduce only honors the high ram.
-     *  option 4 : Both map and reduce should not honors the high ram
-     *             in disable state.
-     *  option 5 : Map should not honors the high ram in disable state.
-     *  option 6 : Reduce should not honors the high ram in disable state.
-     */
-    switch (option) {
-      case 1 :
-               Assert.assertTrue("Gridmix job has not honored the high "
-                                + "ram for map.", simuMapFactor >= 2 
-                                && simuMapFactor == getMapFactor(origConf));
-               Assert.assertTrue("Gridmix job has not honored the high "
-                                + "ram for reduce.", simuReduceFactor >= 2 
-                                && simuReduceFactor 
-                                == getReduceFactor(origConf));
-               break;
-      case 2 :
-               Assert.assertTrue("Gridmix job has not honored the high "
-                                + "ram for map.", simuMapFactor >= 2 
-                                && simuMapFactor == getMapFactor(origConf));
-               break;
-      case 3 :
-               Assert.assertTrue("Girdmix job has not honored the high "
-                                + "ram for reduce.", simuReduceFactor >= 2 
-                                && simuReduceFactor 
-                                == getReduceFactor(origConf));
-               break;
-      case 4 :
-               Assert.assertTrue("Gridmix job has honored the high "
-                                + "ram for map in emulation disable state.", 
-                                simuMapFactor < 2 
-                                && simuMapFactor != getMapFactor(origConf));
-               Assert.assertTrue("Gridmix job has honored the high "
-                                + "ram for reduce in emulation disable state.", 
-                                simuReduceFactor < 2 
-                                && simuReduceFactor 
-                                != getReduceFactor(origConf));
-               break;
-      case 5 :
-               Assert.assertTrue("Gridmix job has honored the high "
-                                + "ram for map in emulation disable state.", 
-                                simuMapFactor < 2 
-                                && simuMapFactor != getMapFactor(origConf));
-               break;
-      case 6 :
-               Assert.assertTrue("Girdmix job has honored the high "
-                                + "ram for reduce in emulation disable state.", 
-                                simuReduceFactor < 2 
-                                && simuReduceFactor 
-                                != getReduceFactor(origConf));
-               break;
-    }
-  }
-
-  /**
-   * Get task memory after scaling based on cluster configuration.
-   * @param jobTaskKey - Job task key attribute.
-   * @param clusterTaskKey - Cluster task key attribute.
-   * @param origConf - Original job configuration.
-   * @param simuConf - Simulated job configuration.
-   * @return scaled task memory value.
-   */
-  @SuppressWarnings("deprecation")
-  public static long getScaledTaskMemInMB(String jobTaskKey, 
-                                          String clusterTaskKey, 
-                                          Configuration origConf, 
-                                          Configuration simuConf) { 
-    long simuClusterTaskValue = 
-        simuConf.getLong(clusterTaskKey, JobConf.DISABLED_MEMORY_LIMIT);
-    long origClusterTaskValue = 
-        origConf.getLong(clusterTaskKey, JobConf.DISABLED_MEMORY_LIMIT);
-    long origJobTaskValue = 
-        origConf.getLong(jobTaskKey, JobConf.DISABLED_MEMORY_LIMIT);
-    double scaleFactor = 
-        Math.ceil((double)origJobTaskValue / origClusterTaskValue);
-    long simulatedJobValue = (long)(scaleFactor * simuClusterTaskValue);
-    return simulatedJobValue;
-  }
-
-  /**
-   * It Verifies the memory limit of a task.
-   * @param TaskMemInMB - task memory limit.
-   * @param taskLimitInMB - task upper limit.
-   */
-  public static void verifyMemoryLimits(long TaskMemInMB, long taskLimitInMB) {
-    if (TaskMemInMB > taskLimitInMB) {
-      Assert.fail("Simulated job's task memory exceeds the " 
-                 + "upper limit of task virtual memory.");
-    }
-  }
-
-  private String convertJobStatus(String jobStatus) {
-    if (jobStatus.equals("SUCCEEDED")) { 
-      return "SUCCESS";
-    } else {
-      return jobStatus;
-    }
-  }
-  
-  private String convertBytes(long bytesValue) {
-    int units = 1024;
-    if( bytesValue < units ) {
-      return String.valueOf(bytesValue)+ "B";
-    } else {
-      // it converts the bytes into either KB or MB or GB or TB etc.
-      int exp = (int)(Math.log(bytesValue) / Math.log(units));
-      return String.format("%1d%sB",(long)(bytesValue / Math.pow(units, exp)), 
-          "KMGTPE".charAt(exp -1));
-    }
-  }
- 
-
-  private long getCounterValue(Counters counters, String key) 
-     throws ParseException { 
-    for (String groupName : counters.getGroupNames()) {
-       CounterGroup totalGroup = counters.getGroup(groupName);
-       Iterator<Counter> itrCounter = totalGroup.iterator();
-       while (itrCounter.hasNext()) {
-         Counter counter = itrCounter.next();
-         if (counter.getName().equals(key)) {
-           return counter.getValue();
-         }
-       }
-    }
-    return 0;
-  }
-}
-
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/UtilsForGridmix.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/UtilsForGridmix.java
deleted file mode 100644
index 723adbc..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/UtilsForGridmix.java
+++ /dev/null
@@ -1,513 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix.test.system;
-
-import java.io.IOException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.hadoop.mapred.gridmix.Gridmix;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobStatus;
-import org.apache.hadoop.mapreduce.JobID;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Arrays;
-import java.net.URI;
-import java.text.SimpleDateFormat;
-import java.io.OutputStream;
-import java.util.Set;
-import java.util.List;
-import java.util.Iterator;
-import java.util.Map;
-import java.io.File;
-import java.io.FileOutputStream;
-import org.apache.hadoop.test.system.ProxyUserDefinitions;
-import org.apache.hadoop.test.system.ProxyUserDefinitions.GroupsAndHost;
-
-/**
- * Gridmix utilities.
- */
-public class UtilsForGridmix {
-  private static final Log LOG = LogFactory.getLog(UtilsForGridmix.class);
-  private static final Path DEFAULT_TRACES_PATH =
-    new Path(System.getProperty("user.dir") + "/src/test/system/resources/");
-
-  /**
-   * cleanup the folder or file.
-   * @param path - folder or file path.
-   * @param conf - cluster configuration 
-   * @throws IOException - If an I/O error occurs.
-   */
-  public static void cleanup(Path path, Configuration conf) 
-     throws IOException {
-    FileSystem fs = path.getFileSystem(conf);
-    fs.delete(path, true);
-    fs.close();
-  }
-
-  /**
-   * Get the login user.
-   * @return - login user as string..
-   * @throws IOException - if an I/O error occurs.
-   */
-  public static String getUserName() throws IOException {
-    return UserGroupInformation.getLoginUser().getUserName();
-  }
-  
-  /**
-   * Get the argument list for gridmix job.
-   * @param gridmixDir - gridmix parent directory.
-   * @param gridmixRunMode - gridmix modes either 1,2,3.
-   * @param values - gridmix runtime values.
-   * @param otherArgs - gridmix other generic args.
-   * @return - argument list as string array.
-   */
-  public static String [] getArgsList(Path gridmixDir, int gridmixRunMode, 
-                                      String [] values, String [] otherArgs) {
-    String [] runtimeArgs = { 
-        "-D", GridMixConfig.GRIDMIX_LOG_MODE + "=DEBUG", 
-        "-D", GridMixConfig.GRIDMIX_OUTPUT_DIR + "=gridmix", 
-        "-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=true", 
-        "-D", GridMixConfig.GRIDMIX_JOB_TYPE + "=" + values[0], 
-        "-D", GridMixConfig.GRIDMIX_USER_RESOLVER + "=" + values[1], 
-        "-D", GridMixConfig.GRIDMIX_SUBMISSION_POLICY + "=" + values[2]
-    };
-
-    String [] classArgs;
-    if ((gridmixRunMode == GridMixRunMode.DATA_GENERATION.getValue() 
-       || gridmixRunMode 
-       == GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()) 
-       && values[1].indexOf("RoundRobinUserResolver") > 0) { 
-      classArgs = new String[] { 
-          "-generate", values[3], 
-          "-users", values[4], 
-          gridmixDir.toString(), 
-          values[5]
-      };
-    } else if (gridmixRunMode == GridMixRunMode.DATA_GENERATION.getValue() 
-              || gridmixRunMode 
-              == GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()) { 
-      classArgs = new String[] { 
-          "-generate", values[3], 
-          gridmixDir.toString(), 
-          values[4]
-      };
-    } else if (gridmixRunMode == GridMixRunMode.RUN_GRIDMIX.getValue() 
-              && values[1].indexOf("RoundRobinUserResolver") > 0) { 
-      classArgs = new String[] { 
-          "-users", values[3], 
-          gridmixDir.toString(), 
-          values[4]
-      };
-    } else { 
-      classArgs = new String[] { 
-         gridmixDir.toString(),values[3]
-      };
-    }
-
-    String [] args = new String [runtimeArgs.length + 
-       classArgs.length + ((otherArgs != null)?otherArgs.length:0)];
-    System.arraycopy(runtimeArgs, 0, args, 0, runtimeArgs.length);
-
-    if (otherArgs != null) {
-      System.arraycopy(otherArgs, 0, args, runtimeArgs.length, 
-                       otherArgs.length);
-      System.arraycopy(classArgs, 0, args, (runtimeArgs.length + 
-                       otherArgs.length), classArgs.length);
-    } else {
-      System.arraycopy(classArgs, 0, args, runtimeArgs.length, 
-                       classArgs.length);
-    }
-    return args;
-  }
-  
-  /**
-   * Create a file with specified size in mb.
-   * @param sizeInMB - file size in mb.
-   * @param inputDir - input directory.
-   * @param conf - cluster configuration.
-   * @throws Exception - if an exception occurs.
-   */
-  public static void createFile(int sizeInMB, Path inputDir, 
-      Configuration conf) throws Exception {
-    Date d = new Date();
-    SimpleDateFormat sdf = new SimpleDateFormat("ddMMyy_HHmmssS");
-    String formatDate = sdf.format(d);
-    FileSystem fs = inputDir.getFileSystem(conf);
-    OutputStream out = fs.create(new Path(inputDir,"datafile_" + formatDate));
-    final byte[] b = new byte[1024 * 1024];
-    for (int index = 0; index < sizeInMB; index++) { 
-      out.write(b);
-    }    
-    out.close();
-    fs.close();
-  }
-  
-  /**
-   * Create directories for a path.
-   * @param path - directories path.
-   * @param conf  - cluster configuration.
-   * @throws IOException  - if an I/O error occurs.
-   */
-  public static void createDirs(Path path,Configuration conf) 
-     throws IOException { 
-    FileSystem fs = path.getFileSystem(conf);
-    if (!fs.exists(path)) { 
-       fs.mkdirs(path);
-    }
-  }
-  
-  /**
-   * Run the Gridmix job with given runtime arguments.
-   * @param gridmixDir - Gridmix parent directory.
-   * @param conf - cluster configuration.
-   * @param gridmixRunMode - gridmix run mode either 1,2,3
-   * @param runtimeValues -gridmix runtime values.
-   * @return - gridmix status either 0 or 1.
-   * @throws Exception
-   */
-  public static int runGridmixJob(Path gridmixDir, Configuration conf, 
-     int gridmixRunMode, String [] runtimeValues) throws Exception {
-    return runGridmixJob(gridmixDir, conf, gridmixRunMode, runtimeValues, null);
-  }
-  /**
-   * Run the Gridmix job with given runtime arguments.
-   * @param gridmixDir - Gridmix parent directory
-   * @param conf - cluster configuration.
-   * @param gridmixRunMode - gridmix run mode.
-   * @param runtimeValues - gridmix runtime values.
-   * @param otherArgs - gridmix other generic args.
-   * @return - gridmix status either 0 or 1.
-   * @throws Exception
-   */
-  
-  public static int runGridmixJob(Path gridmixDir, Configuration conf, 
-                                  int gridmixRunMode, String [] runtimeValues, 
-                                  String [] otherArgs) throws Exception {
-    Path  outputDir = new Path(gridmixDir, "gridmix");
-    Path inputDir = new Path(gridmixDir, "input");
-    LOG.info("Cleanup the data if data already exists.");
-    String modeName = new String();
-    switch (gridmixRunMode) { 
-      case 1 : 
-        cleanup(inputDir, conf);
-        cleanup(outputDir, conf);
-        modeName = GridMixRunMode.DATA_GENERATION.name();
-        break;
-      case 2 : 
-        cleanup(outputDir, conf);
-        modeName = GridMixRunMode.RUN_GRIDMIX.name();
-        break;
-      case 3 : 
-        cleanup(inputDir, conf);
-        cleanup(outputDir, conf);
-        modeName = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.name();
-        break;
-    }
-
-    final String [] args = 
-        UtilsForGridmix.getArgsList(gridmixDir, gridmixRunMode, 
-                                    runtimeValues, otherArgs);
-    Gridmix gridmix = new Gridmix();
-    LOG.info("Submit a Gridmix job in " + runtimeValues[1] 
-            + " mode for " + modeName);
-    int exitCode = ToolRunner.run(conf, gridmix, args);
-    return exitCode;
-  }
-
-  /**
-   * Get the proxy users file.
-   * @param conf - cluster configuration.
-   * @return String - proxy users file.
-   * @Exception - if no proxy users found in configuration.
-   */
-  public static String getProxyUsersFile(Configuration conf) 
-      throws Exception {
-     ProxyUserDefinitions pud = getProxyUsersData(conf);
-     String fileName = buildProxyUsersFile(pud.getProxyUsers());
-     if (fileName == null) { 
-        LOG.error("Proxy users file not found.");
-        throw new Exception("Proxy users file not found.");
-     } else { 
-        return fileName;
-     }
-  }
-  
-  /**
-  * List the current gridmix jobid's.
-  * @param client - job client.
-  * @param execJobCount - number of executed jobs.
-  * @return - list of gridmix jobid's.
-  */
- public static List<JobID> listGridmixJobIDs(JobClient client, 
-     int execJobCount) throws IOException { 
-   List<JobID> jobids = new ArrayList<JobID>();
-   JobStatus [] jobStatus = client.getAllJobs();
-   int numJobs = jobStatus.length;
-   for (int index = 1; index <= execJobCount; index++) {
-     JobStatus js = jobStatus[numJobs - index];
-     JobID jobid = js.getJobID();
-     String jobName = js.getJobName();
-     if (!jobName.equals("GRIDMIX_GENERATE_INPUT_DATA") && 
-         !jobName.equals("GRIDMIX_GENERATE_DISTCACHE_DATA")) {
-       jobids.add(jobid);
-     }
-   }
-   return (jobids.size() == 0)? null : jobids;
- }
-
- /**
-  * List the proxy users. 
-  * @param conf
-  * @return
-  * @throws Exception
-  */
- public static List<String> listProxyUsers(Configuration conf,
-     String loginUser) throws Exception {
-   List<String> proxyUsers = new ArrayList<String>();
-   ProxyUserDefinitions pud = getProxyUsersData(conf);
-   Map<String, GroupsAndHost> usersData = pud.getProxyUsers();
-   Collection users = usersData.keySet();
-   Iterator<String> itr = users.iterator();
-   while (itr.hasNext()) { 
-     String user = itr.next();
-     if (!user.equals(loginUser)){ proxyUsers.add(user); };
-   }
-   return proxyUsers;
- }
-
-  private static String buildProxyUsersFile(final Map<String, GroupsAndHost> 
-      proxyUserData) throws Exception { 
-     FileOutputStream fos = null;
-     File file = null;
-     StringBuffer input = new StringBuffer();
-     Set users = proxyUserData.keySet();
-     Iterator itr = users.iterator();
-     while (itr.hasNext()) { 
-       String user = itr.next().toString();
-       if (!user.equals(
-           UserGroupInformation.getLoginUser().getShortUserName())) {
-         input.append(user);
-         final GroupsAndHost gah = proxyUserData.get(user);
-         final List <String> groups = gah.getGroups();
-         for (String group : groups) { 
-           input.append(",");
-           input.append(group);
-         }
-         input.append("\n");
-       }
-     }
-     if (input.length() > 0) { 
-        try {
-           file = File.createTempFile("proxyusers", null);
-           fos = new FileOutputStream(file);
-           fos.write(input.toString().getBytes());
-        } catch(IOException ioexp) { 
-           LOG.warn(ioexp.getMessage());
-           return null;
-        } finally {
-           fos.close();
-           file.deleteOnExit();
-        }
-        LOG.info("file.toString():" + file.toString());
-        return file.toString();
-     } else {
-        return null;
-     }
-  }
-
-  private static ProxyUserDefinitions getProxyUsersData(Configuration conf)
-      throws Exception { 
-    Iterator itr = conf.iterator();
-    List<String> proxyUsersData = new ArrayList<String>();
-    while (itr.hasNext()) { 
-      String property = itr.next().toString();
-      if (property.indexOf("hadoop.proxyuser") >= 0 
-         && property.indexOf("groups=") >= 0) { 
-        proxyUsersData.add(property.split("\\.")[2]);
-      }
-    }
-
-    if (proxyUsersData.size() == 0) { 
-       LOG.error("No proxy users found in the configuration.");
-       throw new Exception("No proxy users found in the configuration.");
-    }
-
-    ProxyUserDefinitions pud = new ProxyUserDefinitions() { 
-       public boolean writeToFile(URI filePath) throws IOException { 
-           throw new UnsupportedOperationException("No such methood exists.");
-       };
-    };
-
-     for (String userName : proxyUsersData) { 
-        List<String> groups = Arrays.asList(conf.get("hadoop.proxyuser." + 
-            userName + ".groups").split("//,"));
-        List<String> hosts = Arrays.asList(conf.get("hadoop.proxyuser." + 
-            userName + ".hosts").split("//,"));
-        ProxyUserDefinitions.GroupsAndHost definitions = 
-            pud.new GroupsAndHost();
-        definitions.setGroups(groups);
-        definitions.setHosts(hosts);
-        pud.addProxyUser(userName, definitions);
-     }
-     return pud;
-  }
-
-  /**
-   *  Gives the list of paths for MR traces against different time 
-   *  intervals.It fetches only the paths which followed the below 
-   *  file convention.
-   *    Syntax : &lt;FileName&gt;_&lt;TimeIntervals&gt;.json.gz
-   *  There is a restriction in a  file and user has to  
-   *  follow  the below convention for time interval.
-   *    Syntax: &lt;numeric&gt;[m|h|d] 
-   *    e.g : for 10 minutes trace should specify 10m, 
-   *    same way for 1 hour traces should specify 1h, 
-   *    for 1 day traces should specify 1d.
-   *
-   * @param conf - cluster configuration.
-   * @return - list of MR paths as key/value pair based on time interval.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public static Map<String, String> getMRTraces(Configuration conf) 
-     throws IOException { 
-    return getMRTraces(conf, DEFAULT_TRACES_PATH);
-  }
-  
-  /**
-   *  It gives the list of paths for MR traces against different time 
-   *  intervals. It fetches only the paths which followed the below 
-   *  file convention.
-   *    Syntax : &lt;FileNames&gt;_&lt;TimeInterval&gt;.json.gz
-   *  There is a restriction in a file and user has to follow the 
-   *  below convention for time interval. 
-   *    Syntax: &lt;numeric&gt;[m|h|d] 
-   *    e.g : for 10 minutes trace should specify 10m,
-   *    same way for 1 hour traces should specify 1h, 
-   *    for 1 day  traces should specify 1d.
-   *
-   * @param conf - cluster configuration object.
-   * @param tracesPath - MR traces path.
-   * @return - list of MR paths as key/value pair based on time interval.
-   * @throws IOException - If an I/O error occurs.
-   */
-  public static Map<String,String> getMRTraces(Configuration conf, 
-      Path tracesPath) throws IOException { 
-     Map <String, String> jobTraces = new HashMap <String, String>();
-     final FileSystem fs = FileSystem.getLocal(conf);
-     final FileStatus fstat[] = fs.listStatus(tracesPath);
-     for (FileStatus fst : fstat) { 
-        final String fileName = fst.getPath().getName();
-        if (fileName.endsWith("m.json.gz") 
-            || fileName.endsWith("h.json.gz") 
-            || fileName.endsWith("d.json.gz")) { 
-           jobTraces.put(fileName.substring(fileName.indexOf("_") + 1, 
-              fileName.indexOf(".json.gz")), fst.getPath().toString());
-        }
-     }
-     if (jobTraces.size() == 0) { 
-        LOG.error("No traces found in " + tracesPath.toString() + " path.");
-        throw new IOException("No traces found in " 
-                             + tracesPath.toString() + " path.");
-     }
-     return jobTraces;
-  }
-  
-  /**
-   * It list the all the MR traces path irrespective of time.
-   * @param conf - cluster configuration.
-   * @param tracesPath - MR traces path
-   * @return - MR paths as a list.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public static List<String> listMRTraces(Configuration conf, 
-      Path tracesPath) throws IOException {
-     List<String> jobTraces = new ArrayList<String>();
-     final FileSystem fs = FileSystem.getLocal(conf);
-     final FileStatus fstat[] = fs.listStatus(tracesPath);
-     for (FileStatus fst : fstat) {
-        jobTraces.add(fst.getPath().toString());
-     }
-     if (jobTraces.size() == 0) {
-        LOG.error("No traces found in " + tracesPath.toString() + " path.");
-        throw new IOException("No traces found in " 
-                             + tracesPath.toString() + " path.");
-     }
-     return jobTraces;
-  }
-  
-  /**
-   * It list the all the MR traces path irrespective of time.
-   * @param conf - cluster configuration.
-   * @param tracesPath - MR traces path
-   * @return - MR paths as a list.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public static List<String> listMRTraces(Configuration conf) 
-      throws IOException { 
-     return listMRTraces(conf, DEFAULT_TRACES_PATH);
-  }
-
-  /**
-   * Gives the list of MR traces for given time interval.
-   * The time interval should be following convention.
-   *   Syntax : &lt;numeric&gt;[m|h|d]
-   *   e.g : 10m or 1h or 2d etc.
-   * @param conf - cluster configuration
-   * @param timeInterval - trace time interval.
-   * @param tracesPath - MR traces Path.
-   * @return - MR paths as a list for a given time interval.
-   * @throws IOException - If an I/O error occurs.
-   */
-  public static List<String> listMRTracesByTime(Configuration conf, 
-      String timeInterval, Path tracesPath) throws IOException { 
-     List<String> jobTraces = new ArrayList<String>();
-     final FileSystem fs = FileSystem.getLocal(conf);
-     final FileStatus fstat[] = fs.listStatus(tracesPath);
-     for (FileStatus fst : fstat) { 
-        final String fileName = fst.getPath().getName();
-        if (fileName.indexOf(timeInterval) >= 0) { 
-           jobTraces.add(fst.getPath().toString());
-        }
-     }
-     return jobTraces;
-  }
-  
-  /**
-   * Gives the list of MR traces for given time interval.
-   * The time interval should be following convention.
-   *   Syntax : &lt;numeric&gt;[m|h|d]
-   *   e.g : 10m or 1h or 2d etc.
-   * @param conf - cluster configuration
-   * @param timeInterval - trace time interval.
-   * @return - MR paths as a list for a given time interval.
-   * @throws IOException - If an I/O error occurs.
-   */
-  public static List<String> listMRTracesByTime(Configuration conf, 
-      String timeInterval) throws IOException { 
-     return listMRTracesByTime(conf, timeInterval, DEFAULT_TRACES_PATH);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/2m_stream_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/2m_stream_trace.json.gz
deleted file mode 100644
index c145836..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/2m_stream_trace.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/3m_stream_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/3m_stream_trace.json.gz
deleted file mode 100644
index 7bf17a0..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/3m_stream_trace.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/5m_stream_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/5m_stream_trace.json.gz
deleted file mode 100644
index a72e41f..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/5m_stream_trace.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case1_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case1_trace.json.gz
deleted file mode 100644
index 4e5615f..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case1_trace.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case2_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case2_trace.json.gz
deleted file mode 100644
index faba98b..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case2_trace.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case3_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case3_trace.json.gz
deleted file mode 100644
index 5adbf43..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case3_trace.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case4_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case4_trace.json.gz
deleted file mode 100644
index cdff79a..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case4_trace.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/cpu_emul_case1.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/cpu_emul_case1.json.gz
deleted file mode 100644
index 2117738..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/cpu_emul_case1.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/cpu_emul_case2.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/cpu_emul_case2.json.gz
deleted file mode 100644
index b230610..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/cpu_emul_case2.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case1_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case1_trace.json.gz
deleted file mode 100644
index 7b93b07..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case1_trace.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case2_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case2_trace.json.gz
deleted file mode 100644
index 7bdd313..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case2_trace.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case3_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case3_trace.json.gz
deleted file mode 100644
index 04fd705..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case3_trace.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case4_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case4_trace.json.gz
deleted file mode 100644
index 74742fc..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case4_trace.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case5_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case5_trace.json.gz
deleted file mode 100644
index c178761..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case5_trace.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case6_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case6_trace.json.gz
deleted file mode 100644
index 9a53ad2..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case6_trace.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case7_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case7_trace.json.gz
deleted file mode 100644
index 43a181a..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case7_trace.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case8_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case8_trace.json.gz
deleted file mode 100644
index fa3d791..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case8_trace.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case9_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case9_trace.json.gz
deleted file mode 100644
index ee009ed..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case9_trace.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case1.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case1.json.gz
deleted file mode 100644
index c11a148..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case1.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case2.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case2.json.gz
deleted file mode 100644
index aa17252..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case2.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case3.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case3.json.gz
deleted file mode 100644
index 39e90d2..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case3.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case4.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case4.json.gz
deleted file mode 100644
index 229d8d3..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case4.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/mem_emul_case1.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/mem_emul_case1.json.gz
deleted file mode 100644
index 5f7fcab..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/mem_emul_case1.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/mem_emul_case2.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/mem_emul_case2.json.gz
deleted file mode 100644
index d0ea21e..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/mem_emul_case2.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_10m.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_10m.json.gz
deleted file mode 100644
index 2be6f37..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_10m.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_12m.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_12m.json.gz
deleted file mode 100644
index 7850026..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_12m.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_1m.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_1m.json.gz
deleted file mode 100644
index 21bff55..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_1m.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_3m.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_3m.json.gz
deleted file mode 100644
index a27241e..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_3m.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_5m.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_5m.json.gz
deleted file mode 100644
index 441ca3a..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_5m.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_7m.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_7m.json.gz
deleted file mode 100644
index 4aab5a1..0000000
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_7m.json.gz
+++ /dev/null
Binary files differ
diff --git a/hadoop-mapreduce-project/src/docs/src/documentation/content/xdocs/gridmix.xml b/hadoop-mapreduce-project/src/docs/src/documentation/content/xdocs/gridmix.xml
index 410ca40..5e5d0f4 100644
--- a/hadoop-mapreduce-project/src/docs/src/documentation/content/xdocs/gridmix.xml
+++ b/hadoop-mapreduce-project/src/docs/src/documentation/content/xdocs/gridmix.xml
@@ -538,7 +538,8 @@
       </source>
       <p>For backward compatibility reasons, each line of users-list file can
       contain username followed by groupnames in the form username[,group]*.
-      The groupnames will be ignored by Gridmix.
+      The groupnames will be ignored by Gridmix. Empty lines will also be 
+      ignored.
       </p>
     </section>
 
diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java
index 5551bd7..94a584a 100644
--- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java
+++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java
@@ -289,7 +289,7 @@
     // this token is keyed by hostname:port key.
     String fs_addr = 
       SecurityUtil.buildDTServiceName(p1.toUri(), NameNode.DEFAULT_PORT);
-    Token<DelegationTokenIdentifier> nnt = TokenCache.getDelegationToken(
+    Token<DelegationTokenIdentifier> nnt = (Token<DelegationTokenIdentifier>)TokenCache.getDelegationToken(
         credentials, fs_addr);
     System.out.println("dt for " + p1 + "(" + fs_addr + ")" + " = " +  nnt);
     assertNotNull("Token for nn is null", nnt);
diff --git a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JTProtocolAspect.aj b/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JTProtocolAspect.aj
deleted file mode 100644
index 05b6135..0000000
--- a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JTProtocolAspect.aj
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.IOException;
-import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.mapreduce.TaskID;
-import org.apache.hadoop.mapreduce.test.system.JTProtocol;
-import org.apache.hadoop.mapreduce.test.system.JobInfo;
-import org.apache.hadoop.mapreduce.test.system.TTInfo;
-import org.apache.hadoop.mapreduce.test.system.TaskInfo;
-
-/**
- * Aspect which injects the basic protocol functionality which is to be
- * implemented by all the services which implement {@link ClientProtocol}
- * 
- * Aspect also injects default implementation for the {@link JTProtocol}
- */
-
-public aspect JTProtocolAspect {
-
-  // Make the ClientProtocl extend the JTprotocol
-  declare parents : ClientProtocol extends JTProtocol;
-
-  /*
-   * Start of default implementation of the methods in JTProtocol
-   */
-
-  public Configuration JTProtocol.getDaemonConf() throws IOException {
-    return null;
-  }
-
-  public JobInfo JTProtocol.getJobInfo(JobID jobID) throws IOException {
-    return null;
-  }
-
-  public TaskInfo JTProtocol.getTaskInfo(TaskID taskID) throws IOException {
-    return null;
-  }
-
-  public TTInfo JTProtocol.getTTInfo(String trackerName) throws IOException {
-    return null;
-  }
-
-  public JobInfo[] JTProtocol.getAllJobInfo() throws IOException {
-    return null;
-  }
-
-  public TaskInfo[] JTProtocol.getTaskInfo(JobID jobID) throws IOException {
-    return null;
-  }
-
-  public TTInfo[] JTProtocol.getAllTTInfo() throws IOException {
-    return null;
-  }
-  
-  public boolean JTProtocol.isJobRetired(JobID jobID) throws IOException {
-    return false;
-  }
-  
-  public String JTProtocol.getJobHistoryLocationForRetiredJob(JobID jobID) throws IOException {
-    return "";
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JobClientAspect.aj b/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JobClientAspect.aj
deleted file mode 100644
index 49df8a2..0000000
--- a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JobClientAspect.aj
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.IOException;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
-
-public privileged aspect JobClientAspect {
-
-  public ClientProtocol JobClient.getProtocol() {
-    return cluster.getClientProtocol();
-  }
-  
-  public void JobClient.killJob(JobID id) throws IOException,InterruptedException {
-    cluster.getClientProtocol().killJob(
-        org.apache.hadoop.mapred.JobID.downgrade(id));
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JobInProgressAspect.aj b/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JobInProgressAspect.aj
deleted file mode 100644
index ecfd8e9..0000000
--- a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JobInProgressAspect.aj
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.IOException;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.jobhistory.JobHistory;
-import org.apache.hadoop.mapreduce.test.system.JobInfo;
-
-/**
- * Aspect to add a utility method in the JobInProgress for easing up the
- * construction of the JobInfo object.
- */
-privileged aspect JobInProgressAspect {
-
-  /**
-   * Returns a read only view of the JobInProgress object which is used by the
-   * client.
-   * 
-   * @return JobInfo of the current JobInProgress object
-   */
-  public JobInfo JobInProgress.getJobInfo() {
-    String historyLoc = getHistoryPath();
-    boolean isHistoryFileCopied =
-        this.status.getHistoryFile() == null ? false : true;
-    if (tasksInited.get()) {
-      return new JobInfoImpl(
-          this.getJobID(), this.isSetupLaunched(), this.isSetupFinished(), this
-              .isCleanupLaunched(), this.runningMaps(), this.runningReduces(),
-          this.pendingMaps(), this.pendingReduces(), this.finishedMaps(), this
-              .finishedReduces(), this.getStatus(), historyLoc, this
-              .getBlackListedTrackers(), false, this.numMapTasks,
-          this.numReduceTasks, isHistoryFileCopied);
-    } else {
-      return new JobInfoImpl(
-          this.getJobID(), false, false, false, 0, 0, this.pendingMaps(), this
-              .pendingReduces(), this.finishedMaps(), this.finishedReduces(),
-          this.getStatus(), historyLoc, this.getBlackListedTrackers(), this
-              .isComplete(), this.numMapTasks, this.numReduceTasks, false);
-    }
-  }
-
-  private String JobInProgress.getHistoryPath() {
-    String historyLoc = "";
-    if (this.isComplete()) {
-      historyLoc = this.getStatus().getHistoryFile();
-    } else {
-      Path jobHistoryDirectory = this.jobHistory.getJobHistoryLocation();
-      Path historypath =
-          JobHistory.getJobHistoryFile(
-              jobHistoryDirectory, this.getJobID(), this.profile.getUser());
-      historyLoc = historypath.toString();
-    }
-    return historyLoc;
-  }
-
-}
diff --git a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JobTrackerAspect.aj b/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JobTrackerAspect.aj
deleted file mode 100644
index dc005d7..0000000
--- a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JobTrackerAspect.aj
+++ /dev/null
@@ -1,221 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.mapreduce.TaskID;
-import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker;
-import org.apache.hadoop.mapreduce.test.system.JTProtocol;
-import org.apache.hadoop.mapreduce.test.system.JobInfo;
-import org.apache.hadoop.mapreduce.test.system.TTInfo;
-import org.apache.hadoop.mapreduce.test.system.TaskInfo;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.test.system.DaemonProtocol;
-
-/**
- * Aspect class which injects the code for {@link JobTracker} class.
- * 
- */
-public privileged aspect JobTrackerAspect {
-
-
-  public Configuration JobTracker.getDaemonConf() throws IOException {
-    return conf;
-  }
-  /**
-   * Method to get the read only view of the job and its associated information.
-   * 
-   * @param jobID
-   *          id of the job for which information is required.
-   * @return JobInfo of the job requested
-   * @throws IOException
-   */
-  public JobInfo JobTracker.getJobInfo(JobID jobID) throws IOException {
-    JobInProgress jip = jobs.get(org.apache.hadoop.mapred.JobID
-        .downgrade(jobID));
-    if (jip == null) {
-      LOG.warn("No job present for : " + jobID);
-      return null;
-    }
-    JobInfo info;
-    synchronized (jip) {
-      info = jip.getJobInfo();
-    }
-    return info;
-  }
-
-  /**
-   * Method to get the read only view of the task and its associated
-   * information.
-   * 
-   * @param taskID
-   * @return
-   * @throws IOException
-   */
-  public TaskInfo JobTracker.getTaskInfo(TaskID taskID) throws IOException {
-    TaskInProgress tip = getTip(org.apache.hadoop.mapred.TaskID
-        .downgrade(taskID));
-
-    if (tip == null) {
-      LOG.warn("No task present for : " + taskID);
-      return null;
-    }
-    return getTaskInfo(tip);
-  }
-
-  public TTInfo JobTracker.getTTInfo(String trackerName) throws IOException {
-    org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker tt = taskTrackers
-        .get(trackerName);
-    if (tt == null) {
-      LOG.warn("No task tracker with name : " + trackerName + " found");
-      return null;
-    }
-    TaskTrackerStatus status = tt.getStatus();
-    TTInfo info = new TTInfoImpl(status.trackerName, status);
-    return info;
-  }
-
-  // XXX Below two method don't reuse getJobInfo and getTaskInfo as there is a
-  // possibility that retire job can run and remove the job from JT memory
-  // during
-  // processing of the RPC call.
-  public JobInfo[] JobTracker.getAllJobInfo() throws IOException {
-    List<JobInfo> infoList = new ArrayList<JobInfo>();
-    synchronized (jobs) {
-      for (JobInProgress jip : jobs.values()) {
-        JobInfo info = jip.getJobInfo();
-        infoList.add(info);
-      }
-    }
-    return (JobInfo[]) infoList.toArray(new JobInfo[infoList.size()]);
-  }
-
-  public TaskInfo[] JobTracker.getTaskInfo(JobID jobID) throws IOException {
-    JobInProgress jip = jobs.get(org.apache.hadoop.mapred.JobID
-        .downgrade(jobID));
-    if (jip == null) {
-      LOG.warn("Unable to find job : " + jobID);
-      return null;
-    }
-    List<TaskInfo> infoList = new ArrayList<TaskInfo>();
-    synchronized (jip) {
-      for (TaskInProgress tip : jip.setup) {
-        infoList.add(getTaskInfo(tip));
-      }
-      for (TaskInProgress tip : jip.maps) {
-        infoList.add(getTaskInfo(tip));
-      }
-      for (TaskInProgress tip : jip.reduces) {
-        infoList.add(getTaskInfo(tip));
-      }
-      for (TaskInProgress tip : jip.cleanup) {
-        infoList.add(getTaskInfo(tip));
-      }
-    }
-    return (TaskInfo[]) infoList.toArray(new TaskInfo[infoList.size()]);
-  }
-
-  public TTInfo[] JobTracker.getAllTTInfo() throws IOException {
-    List<TTInfo> infoList = new ArrayList<TTInfo>();
-    synchronized (taskTrackers) {
-      for (TaskTracker tt : taskTrackers.values()) {
-        TaskTrackerStatus status = tt.getStatus();
-        TTInfo info = new TTInfoImpl(status.trackerName, status);
-        infoList.add(info);
-      }
-    }
-    return (TTInfo[]) infoList.toArray(new TTInfo[infoList.size()]);
-  }
-  
-  public boolean JobTracker.isJobRetired(JobID id) throws IOException {
-    return retireJobs.get(
-        org.apache.hadoop.mapred.JobID.downgrade(id))!=null?true:false;
-  }
-
-  public String JobTracker.getJobHistoryLocationForRetiredJob(
-      JobID id) throws IOException {
-    String historyFile = this.getJobStatus(id).getHistoryFile();
-    if(historyFile == null) {
-      throw new IOException("The retired job information for the job : " 
-          + id +" is not found");
-    } else {
-      return historyFile;
-    }
-  }
-  pointcut getVersionAspect(String protocol, long clientVersion) : 
-    execution(public long JobTracker.getProtocolVersion(String , 
-      long) throws IOException) && args(protocol, clientVersion);
-
-  long around(String protocol, long clientVersion) :  
-    getVersionAspect(protocol, clientVersion) {
-    if (protocol.equals(DaemonProtocol.class.getName())) {
-      return DaemonProtocol.versionID;
-    } else if (protocol.equals(JTProtocol.class.getName())) {
-      return JTProtocol.versionID;
-    } else {
-      return proceed(protocol, clientVersion);
-    }
-  }
-
-  /**
-   * Point cut which monitors for the start of the jobtracker and sets the right
-   * value if the jobtracker is started.
-   */
-  pointcut jtConstructorPointCut() : 
-        call(JobTracker.new(..));
-
-  after() returning (JobTracker tracker): jtConstructorPointCut() {
-    try {
-      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-      tracker.setUser(ugi.getShortUserName());
-    } catch (IOException e) {
-      tracker.LOG.warn("Unable to get the user information for the "
-          + "Jobtracker");
-    }
-    tracker.setReady(true);
-  }
-  
-  private TaskInfo JobTracker.getTaskInfo(TaskInProgress tip) {
-    TaskStatus[] status = tip.getTaskStatuses();
-    if (status == null) {
-      if (tip.isMapTask()) {
-        status = new MapTaskStatus[]{};
-      }
-      else {
-        status = new ReduceTaskStatus[]{};
-      }
-    }
-    String[] trackers =
-        (String[]) (tip.getActiveTasks().values()).toArray(new String[tip
-            .getActiveTasks().values().size()]);
-    TaskInfo info =
-        new TaskInfoImpl(tip.getTIPId(), tip.getProgress(), tip
-            .getActiveTasks().size(), tip.numKilledTasks(), tip
-            .numTaskFailures(), status, (tip.isJobSetupTask() || tip
-            .isJobCleanupTask()), trackers);
-    return info;
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/MapReducePolicyProviderAspect.aj b/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/MapReducePolicyProviderAspect.aj
deleted file mode 100644
index 482e7d4..0000000
--- a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/MapReducePolicyProviderAspect.aj
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred;
-
-import java.util.ArrayList;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapreduce.test.system.TTProtocol;
-import org.apache.hadoop.security.authorize.Service;
-import org.apache.hadoop.test.system.DaemonProtocol;
-
-/**
- * This aspect adds two MR specific Herriot protocols tp the list of
- * 'authorized' Herriot protocols. Protocol descriptors i.e.
- * 'security.tt.protocol.acl' have to be added to <code>hadoop-policy.xml</code>
- * if present
- */
-public privileged aspect MapReducePolicyProviderAspect {
-  private static final Log LOG = LogFactory
-      .getLog(MapReducePolicyProviderAspect.class);
-  ArrayList<Service> herriotMRServices = null;
-
-  pointcut updateMRServices() :
-    execution (public Service[] MapReducePolicyProvider.getServices());
-
-  Service[] around() : updateMRServices () {
-    herriotMRServices = new ArrayList<Service>();
-    for (Service s : MapReducePolicyProvider.mapReduceServices) {
-      LOG.debug("Copying configured protocol to "
-          + s.getProtocol().getCanonicalName());
-      herriotMRServices.add(s);
-    }
-    herriotMRServices.add(new Service("security.daemon.protocol.acl",
-        DaemonProtocol.class));
-    herriotMRServices.add(new Service("security.tt.protocol.acl",
-        TTProtocol.class));
-    final Service[] retArray = herriotMRServices
-        .toArray(new Service[herriotMRServices.size()]);
-    LOG.debug("Number of configured protocols to return: " + retArray.length);
-    return retArray;
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/TaskAspect.aj b/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/TaskAspect.aj
deleted file mode 100644
index 8c3326b..0000000
--- a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/TaskAspect.aj
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.mapred.Task.TaskReporter;
-import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction;
-import org.apache.hadoop.test.system.ControlAction;
-import org.apache.hadoop.test.system.DaemonProtocol;
-import org.apache.hadoop.mapreduce.test.system.TTProtocol;
-
-public privileged aspect TaskAspect {
-
-  private static final Log LOG = LogFactory.getLog(TaskAspect.class);
-  
-  private Object waitObject = new Object();
-  private AtomicBoolean isWaitingForSignal = new AtomicBoolean(false);
-  
-  private DaemonProtocol daemonProxy;
-
-  pointcut taskDoneIntercept(Task task) : execution(
-      public void Task.done(..)) && target(task);
-  
-  void around(Task task) : taskDoneIntercept(task) {
-    if(task.isJobCleanupTask() || task.isJobSetupTask() || task.isTaskCleanupTask()) {
-      proceed(task);
-      return;
-    }
-    Configuration conf = task.getConf();
-    boolean controlEnabled = FinishTaskControlAction.isControlActionEnabled(conf);
-    if(controlEnabled) {
-      LOG.info("Task control enabled, waiting till client sends signal to " +
-      "complete");
-      try {
-        synchronized (waitObject) {
-          isWaitingForSignal.set(true);
-          waitObject.wait();
-        }
-      } catch (InterruptedException e) {
-      }
-    }
-    proceed(task);
-    return;
-  }
-  
-  pointcut taskStatusUpdate(TaskReporter reporter, TaskAttemptID id) : 
-    call(public boolean TaskUmbilicalProtocol.ping(TaskAttemptID))
-          && this(reporter) && args(id);
-  
-  after(TaskReporter reporter, TaskAttemptID id) throws IOException : 
-    taskStatusUpdate(reporter, id)  {
-    synchronized (waitObject) {
-      if(isWaitingForSignal.get()) {
-        ControlAction[] actions = daemonProxy.getActions(
-            id.getTaskID());
-        if(actions.length == 0) {
-          return;
-        }
-        boolean shouldProceed = false;
-        for(ControlAction action : actions) {
-          if (action instanceof FinishTaskControlAction) {
-            LOG.info("Recv : Control task action to finish task id: " 
-                + action.getTarget());
-            shouldProceed = true;
-            daemonProxy.removeAction(action);
-            LOG.info("Removed the control action from TaskTracker");
-            break;
-          }
-        }
-        if(shouldProceed) {
-          LOG.info("Notifying the task to completion");
-          waitObject.notify();
-        }
-      }
-    }
-  }
-  
-  
-  pointcut rpcInterceptor(Class k, long version,InetSocketAddress addr, 
-      Configuration conf) : call(
-          public static * RPC.getProxy(Class, long ,InetSocketAddress,
-              Configuration)) && args(k, version,addr, conf) && 
-              within(org.apache.hadoop.mapred.Child) ;
-  
-  after(Class k, long version, InetSocketAddress addr, Configuration conf) 
-    throws IOException : rpcInterceptor(k, version, addr, conf) {
-    daemonProxy = 
-      (TTProtocol) RPC.getProxy(
-          TTProtocol.class, TTProtocol.versionID, addr, conf);
-  }
-  
-}
diff --git a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/TaskTrackerAspect.aj b/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/TaskTrackerAspect.aj
deleted file mode 100644
index 51bcdb7..0000000
--- a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/TaskTrackerAspect.aj
+++ /dev/null
@@ -1,155 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.ArrayList;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.test.system.TTProtocol;
-import org.apache.hadoop.mapreduce.test.system.TTTaskInfo;
-import org.apache.hadoop.mapred.TTTaskInfoImpl.MapTTTaskInfo;
-import org.apache.hadoop.mapred.TTTaskInfoImpl.ReduceTTTaskInfo;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.test.system.DaemonProtocol;
-import org.apache.hadoop.util.Shell;
-import org.apache.hadoop.util.Shell.ShellCommandExecutor;
-import org.apache.hadoop.mapred.TaskTracker.TaskInProgress;
-import org.apache.hadoop.mapreduce.TaskAttemptID;
-
-public privileged aspect TaskTrackerAspect {
-
-  declare parents : TaskTracker implements TTProtocol;
-
-  // Add a last sent status field to the Tasktracker class.
-  TaskTrackerStatus TaskTracker.lastSentStatus = null;
-  public static String TaskTracker.TASKJARDIR = TaskTracker.JARSDIR;
-
-  public synchronized TaskTrackerStatus TaskTracker.getStatus()
-      throws IOException {
-    return lastSentStatus;
-  }
-
-  public Configuration TaskTracker.getDaemonConf() throws IOException {
-    return fConf;
-  }
-
-  public TTTaskInfo[] TaskTracker.getTasks() throws IOException {
-    List<TTTaskInfo> infoList = new ArrayList<TTTaskInfo>();
-    synchronized (tasks) {
-      for (TaskInProgress tip : tasks.values()) {
-        TTTaskInfo info = getTTTaskInfo(tip);
-        infoList.add(info);
-      }
-    }
-    return (TTTaskInfo[]) infoList.toArray(new TTTaskInfo[infoList.size()]);
-  }
-
-  public TTTaskInfo TaskTracker.getTask(org.apache.hadoop.mapreduce.TaskID id) 
-      throws IOException {
-    TaskID old = org.apache.hadoop.mapred.TaskID.downgrade(id);
-    synchronized (tasks) {
-      for(TaskAttemptID ta : tasks.keySet()) {
-        if(old.equals(ta.getTaskID())) {
-          return getTTTaskInfo(tasks.get(ta));
-        }
-      }
-    }
-    return null;
-  }
-
-  private TTTaskInfo TaskTracker.getTTTaskInfo(TaskInProgress tip) {
-    TTTaskInfo info;
-    if (tip.task.isMapTask()) {
-      info = new MapTTTaskInfo(tip.slotTaken, tip.wasKilled,
-          (MapTaskStatus) tip.getStatus(), tip.getJobConf(), tip.getTask()
-              .getUser(), tip.getTask().isTaskCleanupTask(), getPid(tip.getTask().getTaskID()));
-    } else {
-      info = new ReduceTTTaskInfo(tip.slotTaken, tip.wasKilled,
-          (ReduceTaskStatus) tip.getStatus(), tip.getJobConf(), tip.getTask()
-              .getUser(), tip.getTask().isTaskCleanupTask(),getPid(tip.getTask().getTaskID()));
-    }
-    return info;
-  }
-
-  before(TaskTrackerStatus newStatus, TaskTracker tracker) : 
-    set(TaskTrackerStatus TaskTracker.status) 
-    && args(newStatus) && this(tracker) {
-    if (newStatus == null) {
-      tracker.lastSentStatus = tracker.status;
-    }
-  }
-
-  pointcut ttConstructorPointCut(JobConf conf) : 
-    call(TaskTracker.new(JobConf)) 
-    && args(conf);
-
-  after(JobConf conf) returning (TaskTracker tracker): 
-    ttConstructorPointCut(conf) {
-    try {
-      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-      tracker.setUser(ugi.getShortUserName());
-    } catch (IOException e) {
-      tracker.LOG.warn("Unable to get the user information for the " +
-          "Jobtracker");
-    }
-    tracker.setReady(true);
-  }
-  
-  pointcut getVersionAspect(String protocol, long clientVersion) : 
-    execution(public long TaskTracker.getProtocolVersion(String , 
-      long) throws IOException) && args(protocol, clientVersion);
-
-  long around(String protocol, long clientVersion) :  
-    getVersionAspect(protocol, clientVersion) {
-    if(protocol.equals(DaemonProtocol.class.getName())) {
-      return DaemonProtocol.versionID;
-    } else if(protocol.equals(TTProtocol.class.getName())) {
-      return TTProtocol.versionID;
-    } else {
-      return proceed(protocol, clientVersion);
-    }
-  }  
-
-  public boolean TaskTracker.isProcessTreeAlive(String pid) throws IOException {
-    // Command to be executed is as follows :
-    // ps -o pid,ppid,sid,command -e | grep -v ps | grep -v grep | grep
-    // "$pid"
-    String checkerCommand =
-        getDaemonConf().get(
-            "test.system.processgroup_checker_command",
-            "ps -o pid,ppid,sid,command -e "
-                + "| grep -v ps | grep -v grep | grep \"$");
-    String[] command =
-        new String[] { "bash", "-c", checkerCommand + pid + "\"" };
-    ShellCommandExecutor shexec = new ShellCommandExecutor(command);
-    try {
-      shexec.execute();
-    } catch (Shell.ExitCodeException e) {
-      TaskTracker.LOG
-          .info("The process tree grep threw a exitcode exception pointing "
-              + "to process tree not being alive.");
-      return false;
-    }
-    TaskTracker.LOG.info("The task grep command is : "
-        + shexec.toString() + " the output from command is : "
-        + shexec.getOutput());
-    return true;
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapreduce/ClusterAspect.aj b/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapreduce/ClusterAspect.aj
deleted file mode 100644
index 04b1537..0000000
--- a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapreduce/ClusterAspect.aj
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapreduce;
-
-import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
-
-public privileged aspect ClusterAspect {
-
-  public ClientProtocol Cluster.getClientProtocol() {
-    return client;
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/conf/system-test-mapred.xml b/hadoop-mapreduce-project/src/test/system/conf/system-test-mapred.xml
deleted file mode 100644
index ff43e57..0000000
--- a/hadoop-mapreduce-project/src/test/system/conf/system-test-mapred.xml
+++ /dev/null
@@ -1,133 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-<!-- Mandatory properties that are to be set and uncommented before running the tests -->
-
-<property>
-  <name>test.system.hdrc.hadoophome</name>
-  <value>$(TO_DO_HADOOP_INSTALL)/share/hadoop-current</value>
-  <description> This is the path to the home directory of the hadoop deployment.
-  </description>
-</property>
-<property>
-  <name>test.system.hdrc.hadoopconfdir</name>
-  <value>$(TO_DO_HADOOP_INSTALL)/conf/hadoop</value>
-  <description> This is the path to the configuration directory of the hadoop
-  cluster that is deployed.
-  </description>
-</property>
-
-<property>
-  <name>test.system.hdrc.tt.hostfile</name>
-  <value>slaves.localcopy.txt</value>
-  <description> File name containing the hostnames where the TaskTrackers are running.
-  </description>
-</property>
-
-<property>
-  <name>test.system.mr.clusterprocess.impl.class</name>
-  <value>org.apache.hadoop.mapreduce.test.system.MRCluster$MRProcessManager</value>
-  <description>
-  Cluster process manager for the Mapreduce subsystem of the cluster. The value
-  org.apache.hadoop.mapreduce.test.system.MRCluster$MultiMRProcessManager can
-  be used to enable multi user support.
-  </description>
-</property>
-
-<property>
-   <name>test.system.hdrc.deployed.scripts.dir</name>
-   <value>./src/test/system/scripts</value>
-   <description>
-     This directory hosts the scripts in the deployed location where
-     the system test client runs.
-   </description>
-</property>
-
-<property>
-  <name>test.system.hdrc.hadoopnewconfdir</name>
-  <value>$(TO_DO_GLOBAL_TMP_DIR)/newconf</value>
-  <description>
-  The directory where the new config files will be copied to in all
-  the clusters is pointed out this directory. 
-  </description>
-</property>
-
-<property>
-  <name>test.system.hdrc.suspend.cmd</name>
-  <value>kill -SIGSTOP</value>
-  <description>
-    Command for suspending the given process.
-  </description>
-</property>
-
-<property>
-  <name>test.system.hdrc.resume.cmd</name>
-  <value>kill -SIGCONT</value>
-  <description>
-  Command for resuming the given suspended process.
-  </description>
-</property>
-<property>
-  <name>test.system.hdrc.hadoop.local.confdir</name>
-  <value>$(TO_DO_GLOBAL_TMP_DIR)/localconf</value>
-  <description>
-    A local directory where a new config file is placed before
-    being pushed into new config location on the cluster.
-  </description>
-</property>
-
-<!-- Mandatory keys to be set for the multi user support to be enabled.  -->
-
- <property>
-  <name>test.system.mr.clusterprocess.impl.class</name>
-  <value>org.apache.hadoop.mapreduce.test.system.MRCluster$MultiMRProcessManager</value>
-  <description>
-    Enabling multi user based cluster process manger.
-  </description>
-</property>
-
-<property>
-  <name>test.system.hdrc.multi-user.list.path</name>
-  <value>$(TO_DO_HADOOP_INSTALL)/conf/hadoop/proxyusers</value>
-  <description>
-  Multi user list for creating the proxy users.
-  </description>
-</property>
-
-<property>
-  <name>test.system.hdrc.multi-user.binary.path</name>
-  <value>$(TO_DO_HADOOP_INSTALL)/conf/hadoop/runAs</value>
-  <description>
-    Local file system path on gate way to cluster-controller binary including the binary name.
-    To build the binary the following commands need to be executed:
-     % ant run-as -Drun-as.hadoop.home.dir=(HADOOP_PREFIX of setup cluster)
-     % cp build-fi/system/c++-build/runAs test.system.hdrc.multi-user.binary.path
-    Location of the cluster is important security precaution.
-    The binary should be owned by root and test user group permission should be set such a
-    way that it can be executed by binary. Example usage would be:
-     % sudo chown root binary
-     % sudo chmod 6511 binary
-    Change permission appropriately to make it more secure.
-  </description>
-</property>
-
-<property>
-  <name>test.system.hdrc.multi-user.managinguser.jobtracker</name>
-  <value>*</value>
-  <description>
-    User value for managing the particular daemon, please note that these user should be
-    present on gateways also, an example configuration for the above would be 
-    key name = test.system.hdrc.multi-user.managinguser.jobtracker
-    key value = guest
-    Please note the daemon names are all lower case, corresponding to hadoop-daemon.sh command.
-  </description>
-</property>
-<property>
-  <name>test.system.hdrc.multi-user.managinguser.tasktracker</name>
-  <value>*</value>
-</property>
- 
-</configuration>
diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/JobInfoImpl.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/JobInfoImpl.java
deleted file mode 100644
index 28b2e72..0000000
--- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/JobInfoImpl.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.hadoop.mapred.JobStatus;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.mapreduce.test.system.JobInfo;
-
-/**
- * Concrete implementation of the JobInfo interface which is exposed to the
- * clients.
- * Look at {@link JobInfo} for further details.
- */
-class JobInfoImpl implements JobInfo {
-
-  private List<String> blackListedTracker;
-  private String historyUrl;
-  private JobID id;
-  private boolean setupLaunched;
-  private boolean setupFinished;
-  private boolean cleanupLaunched;
-  private JobStatus status;
-  private int runningMaps;
-  private int runningReduces;
-  private int waitingMaps;
-  private int waitingReduces;
-  private int finishedMaps;
-  private int finishedReduces;
-  private int numMaps;
-  private int numReduces;
-  private boolean historyCopied;
-
-  public JobInfoImpl() {
-    id = new JobID();
-    status = new JobStatus();
-    blackListedTracker = new LinkedList<String>();
-    historyUrl = "";
-  }
-  
-  public JobInfoImpl(
-      JobID id, boolean setupLaunched, boolean setupFinished,
-      boolean cleanupLaunched, int runningMaps, int runningReduces,
-      int waitingMaps, int waitingReduces, int finishedMaps,
-      int finishedReduces, JobStatus status, String historyUrl,
-      List<String> blackListedTracker, boolean isComplete, int numMaps,
-      int numReduces, boolean historyCopied) {
-    super();
-    this.blackListedTracker = blackListedTracker;
-    this.historyUrl = historyUrl;
-    this.id = id;
-    this.setupLaunched = setupLaunched;
-    this.setupFinished = setupFinished;
-    this.cleanupLaunched = cleanupLaunched;
-    this.status = status;
-    this.runningMaps = runningMaps;
-    this.runningReduces = runningReduces;
-    this.waitingMaps = waitingMaps;
-    this.waitingReduces = waitingReduces;
-    this.finishedMaps = finishedMaps;
-    this.finishedReduces = finishedReduces;
-    this.numMaps = numMaps;
-    this.numReduces = numReduces;
-    this.historyCopied = historyCopied;
-  }
-
-  @Override
-  public List<String> getBlackListedTrackers() {
-    return blackListedTracker;
-  }
-
-  @Override
-  public String getHistoryUrl() {
-    return historyUrl;
-  }
-
-  @Override
-  public JobID getID() {
-    return id;
-  }
-
-  @Override
-  public JobStatus getStatus() {
-    return status;
-  }
-
-  @Override
-  public boolean isCleanupLaunched() {
-    return cleanupLaunched;
-  }
-
-  @Override
-  public boolean isSetupLaunched() {
-    return setupLaunched;
-  }
-
-  @Override
-  public boolean isSetupFinished() {
-    return setupFinished;
-  }
-
-  @Override
-  public int runningMaps() {
-    return runningMaps;
-  }
-
-  @Override
-  public int runningReduces() {
-    return runningReduces;
-  }
-
-  @Override
-  public int waitingMaps() {
-    return waitingMaps;
-  }
-
-  @Override
-  public int waitingReduces() {
-    return waitingReduces;
-  }
- 
-  @Override
-  public int finishedMaps() {
-    return finishedMaps;
-  }
-
-  @Override
-  public int finishedReduces() {
-    return finishedReduces;
-  }
-  
-  @Override
-  public int numMaps() {
-    return numMaps;
-  }
-  
-  @Override
-  public int numReduces() {
-    return numReduces;
-  }
-  
-  @Override
-  public boolean isHistoryFileCopied() {
-    return historyCopied;
-  }
-  
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    id.readFields(in);
-    setupLaunched = in.readBoolean();
-    setupFinished = in.readBoolean();
-    cleanupLaunched = in.readBoolean();
-    status.readFields(in);
-    runningMaps = in.readInt();
-    runningReduces = in.readInt();
-    waitingMaps = in.readInt();
-    waitingReduces = in.readInt();
-    historyUrl = in.readUTF();
-    int size = in.readInt();
-    for (int i = 0; i < size; i++) {
-      blackListedTracker.add(in.readUTF());
-    }
-    finishedMaps = in.readInt();
-    finishedReduces = in.readInt();
-    numMaps = in.readInt();
-    numReduces = in.readInt();
-    historyCopied = in.readBoolean();
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    id.write(out);
-    out.writeBoolean(setupLaunched);
-    out.writeBoolean(setupFinished);
-    out.writeBoolean(cleanupLaunched);
-    status.write(out);
-    out.writeInt(runningMaps);
-    out.writeInt(runningReduces);
-    out.writeInt(waitingMaps);
-    out.writeInt(waitingReduces);
-    out.writeUTF(historyUrl);
-    out.writeInt(blackListedTracker.size());
-    for (String str : blackListedTracker) {
-      out.writeUTF(str);
-    }
-    out.writeInt(finishedMaps);
-    out.writeInt(finishedReduces);
-    out.writeInt(numMaps);
-    out.writeInt(numReduces);
-    out.writeBoolean(historyCopied);
-  }
-
-
-}
diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/TTInfoImpl.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/TTInfoImpl.java
deleted file mode 100644
index d17e171..0000000
--- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/TTInfoImpl.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.mapred.TaskTrackerStatus;
-import org.apache.hadoop.mapreduce.test.system.TTInfo;
-
-/**
- * Concrete implementation of the TaskTracker information which is passed to 
- * the client from JobTracker.
- * Look at {@link TTInfo}
- */
-
-class TTInfoImpl implements TTInfo {
-
-  private String taskTrackerName;
-  private TaskTrackerStatus status;
-
-  public TTInfoImpl() {
-    taskTrackerName = "";
-    status = new TaskTrackerStatus();
-  }
-  
-  public TTInfoImpl(String taskTrackerName, TaskTrackerStatus status) {
-    super();
-    this.taskTrackerName = taskTrackerName;
-    this.status = status;
-  }
-
-  @Override
-  public String getName() {
-    return taskTrackerName;
-  }
-
-  @Override
-  public TaskTrackerStatus getStatus() {
-    return status;
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    taskTrackerName = in.readUTF();
-    status.readFields(in);
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    out.writeUTF(taskTrackerName);
-    status.write(out);
-  }
-
-}
diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/TTTaskInfoImpl.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/TTTaskInfoImpl.java
deleted file mode 100644
index ed279de..0000000
--- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/TTTaskInfoImpl.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.test.system.TTTaskInfo;
-/**
- * Abstract class which passes the Task view of the TaskTracker to the client.
- * See {@link TTInfoImpl} for further details.
- *
- */
-abstract class TTTaskInfoImpl implements TTTaskInfo {
-
-  private boolean slotTaken;
-  private boolean wasKilled;
-  TaskStatus status;
-  Configuration conf;
-  String user;
-  boolean isTaskCleanupTask;
-  private String pid;
-
-  public TTTaskInfoImpl() {
-  }
-
-  public TTTaskInfoImpl(boolean slotTaken, boolean wasKilled,
-      TaskStatus status, Configuration conf, String user,
-      boolean isTaskCleanupTask, String pid) {
-    super();
-    this.slotTaken = slotTaken;
-    this.wasKilled = wasKilled;
-    this.status = status;
-    this.conf = conf;
-    this.user = user;
-    this.isTaskCleanupTask = isTaskCleanupTask;
-    this.pid = pid;
-  }
-
-  @Override
-  public boolean slotTaken() {
-    return slotTaken;
-  }
-
-  @Override
-  public boolean wasKilled() {
-    return wasKilled;
-  }
-
-  @Override
-  public abstract TaskStatus getTaskStatus();
-
-  @Override
-  public Configuration getConf() {
-    return conf;
-  }
-  
-  @Override
-  public String getUser() {
-    return user;
-  }
-  
-  @Override
-  public boolean isTaskCleanupTask() {
-    return isTaskCleanupTask;
-  }
-  
-  @Override
-  public String getPid() {
-    return pid;
-  }
-  
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    slotTaken = in.readBoolean();
-    wasKilled = in.readBoolean();
-    conf = new Configuration();
-    conf.readFields(in);
-    user = in.readUTF();
-    isTaskCleanupTask = in.readBoolean();
-    pid = in.readUTF();
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    out.writeBoolean(slotTaken);
-    out.writeBoolean(wasKilled);
-    conf.write(out);
-    out.writeUTF(user);
-    out.writeBoolean(isTaskCleanupTask);
-    if (pid != null) {
-      out.writeUTF(pid);
-    } else {
-      out.writeUTF("");
-    }
-    status.write(out);
-  }
-
-  static class MapTTTaskInfo extends TTTaskInfoImpl {
-
-    public MapTTTaskInfo() {
-      super();
-    }
-
-    public MapTTTaskInfo(boolean slotTaken, boolean wasKilled,
-        MapTaskStatus status, Configuration conf, String user,
-        boolean isTaskCleanup,String pid) {
-      super(slotTaken, wasKilled, status, conf, user, isTaskCleanup, pid);
-    }
-
-    @Override
-    public TaskStatus getTaskStatus() {
-      return status;
-    }
-    
-    public void readFields(DataInput in) throws IOException {
-      super.readFields(in);
-      status = new MapTaskStatus();
-      status.readFields(in);
-    }
-  }
-
-  static class ReduceTTTaskInfo extends TTTaskInfoImpl {
-
-    public ReduceTTTaskInfo() {
-      super();
-    }
-
-    public ReduceTTTaskInfo(boolean slotTaken, boolean wasKilled,
-        ReduceTaskStatus status, Configuration conf, String user,
-        boolean isTaskCleanup, String pid) {
-      super(slotTaken, wasKilled, status, conf, user, isTaskCleanup, pid);
-    }
-
-    @Override
-    public TaskStatus getTaskStatus() {
-      return status;
-    }
-    
-    public void readFields(DataInput in) throws IOException {
-      super.readFields(in);
-      status = new ReduceTaskStatus();
-      status.readFields(in);
-    }
-  }
-
-}
diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/TaskInfoImpl.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/TaskInfoImpl.java
deleted file mode 100644
index 6871203..0000000
--- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/TaskInfoImpl.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.mapred.TaskStatus;
-import org.apache.hadoop.mapreduce.TaskID;
-import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapreduce.test.system.TaskInfo;
-
-/**
- * Concrete class to expose out the task related information to the Clients from
- * the JobTracker. Look at {@link TaskInfo} for further details.
- */
-class TaskInfoImpl implements TaskInfo {
-
-  private double progress;
-  private TaskID taskID;
-  private int killedAttempts;
-  private int failedAttempts;
-  private int runningAttempts;
-  private TaskStatus[] taskStatus;
-  private boolean setupOrCleanup;
-  private String[] taskTrackers;
-
-  public TaskInfoImpl() {
-    taskID = new TaskID();
-  }
-
-  public TaskInfoImpl(
-      TaskID taskID, double progress, int runningAttempts, int killedAttempts,
-      int failedAttempts, TaskStatus[] taskStatus, boolean setupOrCleanup,
-      String[] taskTrackers) {
-    this.progress = progress;
-    this.taskID = taskID;
-    this.killedAttempts = killedAttempts;
-    this.failedAttempts = failedAttempts;
-    this.runningAttempts = runningAttempts;
-    if (taskStatus != null) {
-      this.taskStatus = taskStatus;
-    } else {
-      if (taskID.getTaskType() == TaskType.MAP) {
-        this.taskStatus = new MapTaskStatus[] {};
-      } else {
-        this.taskStatus = new ReduceTaskStatus[] {};
-      }
-    }
-    this.setupOrCleanup = setupOrCleanup;
-    this.taskTrackers = taskTrackers;
-  }
-
-  @Override
-  public double getProgress() {
-    return progress;
-  }
-
-  @Override
-  public TaskID getTaskID() {
-    return taskID;
-  }
-
-  @Override
-  public int numKilledAttempts() {
-    return killedAttempts;
-  }
-
-  @Override
-  public int numFailedAttempts() {
-    return failedAttempts;
-  }
-
-  @Override
-  public int numRunningAttempts() {
-    return runningAttempts;
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    taskID.readFields(in);
-    progress = in.readDouble();
-    runningAttempts = in.readInt();
-    killedAttempts = in.readInt();
-    failedAttempts = in.readInt();
-    int size = in.readInt();
-    if (taskID.getTaskType() == TaskType.MAP) {
-      taskStatus = new MapTaskStatus[size];
-    } else {
-      taskStatus = new ReduceTaskStatus[size];
-    }
-    for (int i = 0; i < size; i++) {
-      if (taskID.getTaskType() == TaskType.MAP) {
-        taskStatus[i] = new MapTaskStatus();
-      } else {
-        taskStatus[i] = new ReduceTaskStatus();
-      }
-      taskStatus[i].readFields(in);
-      taskStatus[i].setTaskTracker(in.readUTF());
-    }
-    setupOrCleanup = in.readBoolean();
-    size = in.readInt();
-    taskTrackers = new String[size];
-    for (int i = 0; i < size; i++) {
-      taskTrackers[i] = in.readUTF();
-    }
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    taskID.write(out);
-    out.writeDouble(progress);
-    out.writeInt(runningAttempts);
-    out.writeInt(killedAttempts);
-    out.writeInt(failedAttempts);
-    out.writeInt(taskStatus.length);
-    for (TaskStatus t : taskStatus) {
-      t.write(out);
-      out.writeUTF(t.getTaskTracker());
-    }
-    out.writeBoolean(setupOrCleanup);
-    out.writeInt(taskTrackers.length);
-    for (String tt : taskTrackers) {
-      out.writeUTF(tt);
-    }
-  }
-
-  @Override
-  public TaskStatus[] getTaskStatus() {
-    return taskStatus;
-  }
-
-  @Override
-  public boolean isSetupOrCleanup() {
-    return setupOrCleanup;
-  }
-
-  @Override
-  public String[] getTaskTrackers() {
-    return taskTrackers;
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/FinishTaskControlAction.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/FinishTaskControlAction.java
deleted file mode 100644
index 64677b8..0000000
--- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/FinishTaskControlAction.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.test.system;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapred.TaskID;
-import org.apache.hadoop.test.system.ControlAction;
-
-/**
- * Control Action which signals a controlled task to proceed to completion. <br/>
- */
-public class FinishTaskControlAction extends ControlAction<TaskID> {
-
-  private static final String ENABLE_CONTROLLED_TASK_COMPLETION =
-      "test.system.enabled.task.completion.control";
-
-  /**
-   * Create a default control action. <br/>
-   * 
-   */
-  public FinishTaskControlAction() {
-    super(new TaskID());
-  }
-
-  /**
-   * Create a control action specific to a particular task. <br/>
-   * 
-   * @param id
-   *          of the task.
-   */
-  public FinishTaskControlAction(TaskID id) {
-    super(id);
-  }
-
-  /**
-   * Sets up the job to be controlled using the finish task control action. 
-   * <br/>
-   * 
-   * @param conf
-   *          configuration to be used submit the job.
-   */
-  public static void configureControlActionForJob(Configuration conf) {
-    conf.setBoolean(ENABLE_CONTROLLED_TASK_COMPLETION, true);
-  }
-  
-  /**
-   * Checks if the control action is enabled in the passed configuration. <br/>
-   * @param conf configuration
-   * @return true if action is enabled.
-   */
-  public static boolean isControlActionEnabled(Configuration conf) {
-    return conf.getBoolean(ENABLE_CONTROLLED_TASK_COMPLETION, false);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTClient.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTClient.java
deleted file mode 100644
index 8c9146c..0000000
--- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTClient.java
+++ /dev/null
@@ -1,348 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.test.system;
-
-import java.io.IOException;
-
-import junit.framework.Assert;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.JobStatus;
-import org.apache.hadoop.mapred.JobTracker;
-import org.apache.hadoop.mapred.RunningJob;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.test.system.process.RemoteProcess;
-import org.apache.hadoop.mapred.TaskStatus;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapreduce.test.system.TaskInfo;
-import static org.junit.Assert.*;
-
-/**
- * JobTracker client for system tests.
- */
-public class JTClient extends MRDaemonClient<JTProtocol> {
-  static final Log LOG = LogFactory.getLog(JTClient.class);
-  private JobClient client;
-  private static final String HADOOP_JT_OPTS_ENV = "HADOOP_JOBTRACKER_OPTS";
-
-  /**
-   * Create JobTracker client to talk to {@link JobTracker} specified in the
-   * configuration. <br/>
-   * 
-   * @param conf
-   *          configuration used to create a client.
-   * @param daemon
-   *          the process management instance for the {@link JobTracker}
-   * @throws IOException
-   */
-  public JTClient(Configuration conf, RemoteProcess daemon) throws IOException {
-    super(conf, daemon);
-  }
-
-  @Override
-  public synchronized void connect() throws IOException {
-    if (isConnected()) {
-      return;
-    }
-    client = new JobClient(new JobConf(getConf()));
-    setConnected(true);
-  }
-
-  @Override
-  public synchronized void disconnect() throws IOException {
-    client.close();
-  }
-
-  @Override
-  public synchronized JTProtocol getProxy() {
-    return (JTProtocol) client.getProtocol();
-  }
-
-  /**
-   * Gets the {@link JobClient} which can be used for job submission. JobClient
-   * which is returned would not contain the decorated API's. To be used for
-   * submitting of the job.
-   * 
-   * @return client handle to the JobTracker
-   */
-  public JobClient getClient() {
-    return client;
-  }
-
-  /**
-   * Gets the configuration which the JobTracker is currently running.<br/>
-   * 
-   * @return configuration of JobTracker.
-   * 
-   * @throws IOException
-   */
-  public Configuration getJobTrackerConfig() throws IOException {
-    return getProxy().getDaemonConf();
-  }
-
-  /**
-   * Kills the job. <br/>
-   * 
-   * @param id
-   *          of the job to be killed.
-   * @throws IOException
-   */
-  public void killJob(JobID id) throws IOException {
-    try {
-      getClient().killJob(id);
-    } catch (InterruptedException e) {
-      throw new IOException(e);
-    }
-  }
-
-  /**
-   * Verification API to check running jobs and running job states. users have
-   * to ensure that their jobs remain running state while verification is
-   * called. <br/>
-   * 
-   * @param jobId
-   *          of the job to be verified.
-   * 
-   * @throws Exception
-   */
-  public void verifyRunningJob(JobID jobId) throws Exception {
-  }
-
-  private JobInfo getJobInfo(JobID jobId) throws IOException {
-    JobInfo info = getProxy().getJobInfo(jobId);
-    if (info == null && !getProxy().isJobRetired(jobId)) {
-      Assert.fail("Job id : " + jobId + " has never been submitted to JT");
-    }
-    return info;
-  }
-
-  /**
-   * Verification API to wait till job retires and verify all the retired state
-   * is correct. <br/>
-   * 
-   * @param job
-   *          of the job used for completion
-   * @return job handle
-   * @throws Exception
-   */
-  public Job submitAndVerifyJob(Job job) throws Exception {
-    job.submit();
-    JobID jobId = job.getJobID();
-    verifyRunningJob(jobId);
-    verifyCompletedJob(jobId);
-    return job;
-  }
-
-  /**
-   * Verification API to check if the job completion state is correct. <br/>
-   * 
-   * @param id
-   *          id of the job to be verified.
-   */
-
-  public void verifyCompletedJob(JobID id) throws Exception {
-    RunningJob rJob =
-        getClient().getJob(org.apache.hadoop.mapred.JobID.downgrade(id));
-    while (!rJob.isComplete()) {
-      LOG.info("waiting for job :" + id + " to retire");
-      Thread.sleep(1000);
-      rJob = getClient().getJob(org.apache.hadoop.mapred.JobID.downgrade(id));
-    }
-    verifyJobDetails(id);
-    JobInfo jobInfo = getJobInfo(id);
-    if (jobInfo != null) {
-      while (!jobInfo.isHistoryFileCopied()) {
-        Thread.sleep(1000);
-        LOG.info(id + " waiting for history file to copied");
-        jobInfo = getJobInfo(id);
-        if (jobInfo == null) {
-          break;
-        }
-      }
-    }
-    verifyJobHistory(id);
-  }
-
-  /**
-   * Verification API to check if the job details are semantically correct.<br/>
-   * 
-   * @param jobId
-   *          jobID of the job
-   * @return true if all the job verifications are verified to be true
-   * @throws Exception
-   */
-  public void verifyJobDetails(JobID jobId) throws Exception {
-    // wait till the setup is launched and finished.
-    JobInfo jobInfo = getJobInfo(jobId);
-    if (jobInfo == null) {
-      return;
-    }
-    LOG.info("waiting for the setup to be finished");
-    while (!jobInfo.isSetupFinished()) {
-      Thread.sleep(2000);
-      jobInfo = getJobInfo(jobId);
-      if (jobInfo == null) {
-        break;
-      }
-    }
-    // verify job id.
-    assertTrue(jobId.toString().startsWith("job_"));
-    LOG.info("verified job id and is : " + jobId.toString());
-    // verify the number of map/reduce tasks.
-    verifyNumTasks(jobId);
-    // should verify job progress.
-    verifyJobProgress(jobId);
-    jobInfo = getJobInfo(jobId);
-    if (jobInfo == null) {
-      return;
-    }
-    if (jobInfo.getStatus().getRunState() == JobStatus.SUCCEEDED) {
-      // verify if map/reduce progress reached 1.
-      jobInfo = getJobInfo(jobId);
-      if (jobInfo == null) {
-        return;
-      }
-      assertEquals(1.0, jobInfo.getStatus().mapProgress(), 0.001);
-      assertEquals(1.0, jobInfo.getStatus().reduceProgress(), 0.001);
-      // verify successful finish of tasks.
-      verifyAllTasksSuccess(jobId);
-    }
-    if (jobInfo.getStatus().isJobComplete()) {
-      // verify if the cleanup is launched.
-      jobInfo = getJobInfo(jobId);
-      if (jobInfo == null) {
-        return;
-      }
-      assertTrue(jobInfo.isCleanupLaunched());
-      LOG.info("Verified launching of cleanup");
-    }
-  }
-
-  public void verifyAllTasksSuccess(JobID jobId) throws IOException {
-    JobInfo jobInfo = getJobInfo(jobId);
-    if (jobInfo == null) {
-      return;
-    }
-
-    TaskInfo[] taskInfos = getProxy().getTaskInfo(jobId);
-
-    if (taskInfos.length == 0 && getProxy().isJobRetired(jobId)) {
-      LOG.info("Job has been retired from JT memory : " + jobId);
-      return;
-    }
-
-    for (TaskInfo taskInfo : taskInfos) {
-      TaskStatus[] taskStatus = taskInfo.getTaskStatus();
-      if (taskStatus != null && taskStatus.length > 0) {
-        int i;
-        for (i = 0; i < taskStatus.length; i++) {
-          if (TaskStatus.State.SUCCEEDED.equals(taskStatus[i].getRunState())) {
-            break;
-          }
-        }
-        assertFalse(i == taskStatus.length);
-      }
-    }
-    LOG.info("verified that none of the tasks failed.");
-  }
-
-  public void verifyJobProgress(JobID jobId) throws IOException {
-    JobInfo jobInfo;
-    jobInfo = getJobInfo(jobId);
-    if (jobInfo == null) {
-      return;
-    }
-    assertTrue(jobInfo.getStatus().mapProgress() >= 0
-        && jobInfo.getStatus().mapProgress() <= 1);
-    LOG.info("verified map progress and is "
-        + jobInfo.getStatus().mapProgress());
-    assertTrue(jobInfo.getStatus().reduceProgress() >= 0
-        && jobInfo.getStatus().reduceProgress() <= 1);
-    LOG.info("verified reduce progress and is "
-        + jobInfo.getStatus().reduceProgress());
-  }
-
-  public void verifyNumTasks(JobID jobId) throws IOException {
-    JobInfo jobInfo;
-    jobInfo = getJobInfo(jobId);
-    if (jobInfo == null) {
-      return;
-    }
-    assertEquals(jobInfo.numMaps(), (jobInfo.runningMaps()
-        + jobInfo.waitingMaps() + jobInfo.finishedMaps()));
-    LOG.info("verified number of map tasks and is " + jobInfo.numMaps());
-
-    assertEquals(jobInfo.numReduces(), (jobInfo.runningReduces()
-        + jobInfo.waitingReduces() + jobInfo.finishedReduces()));
-    LOG.info("verified number of reduce tasks and is " + jobInfo.numReduces());
-  }
-
-  /**
-   * Verification API to check if the job history file is semantically correct. <br/>
-   * 
-   * 
-   * @param jobId
-   *          of the job to be verified.
-   * @throws IOException
-   */
-  public void verifyJobHistory(JobID jobId) throws IOException {
-    JobInfo info = getJobInfo(jobId);
-    String url = "";
-    if (info == null) {
-      LOG.info("Job has been retired from JT memory : " + jobId);
-      url = getProxy().getJobHistoryLocationForRetiredJob(jobId);
-    } else {
-      url = info.getHistoryUrl();
-    }
-    Path p = new Path(url);
-    if (p.toUri().getScheme().equals("file:/")) {
-      FileStatus st = getFileStatus(url, true);
-      Assert.assertNotNull("Job History file for "
-          + jobId + " not present " + "when job is completed", st);
-    } else {
-      FileStatus st = getFileStatus(url, false);
-      Assert.assertNotNull("Job History file for "
-          + jobId + " not present " + "when job is completed", st);
-    }
-    LOG.info("Verified the job history for the jobId : " + jobId);
-  }
-
-  @Override
-  public String getHadoopOptsEnvName() {
-    return HADOOP_JT_OPTS_ENV;
-  }
-
-  /**
-   * Concrete implementation of abstract super class method
-   *
-   * @param attributeName name of the attribute to be retrieved
-   * @return Object value of the given attribute
-   * @throws IOException is thrown in case of communication errors
-   */
-  @Override
-  public Object getDaemonAttribute(String attributeName) throws IOException {
-    return getJmxAttribute("JobTracker", "JobTrackerInfo", attributeName);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTProtocol.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTProtocol.java
deleted file mode 100644
index 4e0f3c8..0000000
--- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTProtocol.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.test.system;
-
-import java.io.IOException;
-
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.mapreduce.TaskID;
-import org.apache.hadoop.test.system.DaemonProtocol;
-
-/**
- * Client side API's exposed from JobTracker.
- */
-public interface JTProtocol extends DaemonProtocol {
-  long versionID = 1L;
-
-  /**
-   * Get the information pertaining to given job.<br/>
-   * The returned JobInfo object can be null when the
-   * specified job by the job id is retired from the 
-   * JobTracker memory which happens after job is 
-   * completed. <br/>
-   * 
-   * @param id
-   *          of the job for which information is required.
-   * @return information of regarding job null if job is 
-   *         retired from JobTracker memory.
-   * @throws IOException
-   */
-  public JobInfo getJobInfo(JobID jobID) throws IOException;
-
-  /**
-   * Gets the information pertaining to a task. <br/>
-   * The returned TaskInfo object can be null when the 
-   * specified task specified by the task id is retired
-   * from the JobTracker memory which happens after the
-   * job is completed. <br/>
-   * @param id
-   *          of the task for which information is required.
-   * @return information of regarding the task null if the 
-   *          task is retired from JobTracker memory.
-   * @throws IOException
-   */
-  public TaskInfo getTaskInfo(TaskID taskID) throws IOException;
-
-  /**
-   * Gets the information pertaining to a given TaskTracker. <br/>
-   * The returned TTInfo class can be null if the given TaskTracker
-   * information is removed from JobTracker memory which is done
-   * when the TaskTracker is marked lost by the JobTracker. <br/>
-   * @param name
-   *          of the tracker.
-   * @return information regarding the tracker null if the TaskTracker
-   *          is marked lost by the JobTracker.
-   * @throws IOException
-   */
-  public TTInfo getTTInfo(String trackerName) throws IOException;
-
-  /**
-   * Gets a list of all available jobs with JobTracker.<br/>
-   * 
-   * @return list of all jobs.
-   * @throws IOException
-   */
-  public JobInfo[] getAllJobInfo() throws IOException;
-
-  /**
-   * Gets a list of tasks pertaining to a job. <br/>
-   * 
-   * @param id
-   *          of the job.
-   * 
-   * @return list of all tasks for the job.
-   * @throws IOException
-   */
-  public TaskInfo[] getTaskInfo(JobID jobID) throws IOException;
-
-  /**
-   * Gets a list of TaskTrackers which have reported to the JobTracker. <br/>
-   * 
-   * @return list of all TaskTracker.
-   * @throws IOException
-   */
-  public TTInfo[] getAllTTInfo() throws IOException;
-
-  /**
-   * Checks if a given job is retired from the JobTrackers Memory. <br/>
-   * 
-   * @param id
-   *          of the job
-   * @return true if job is retired.
-   * @throws IOException
-   */
-  boolean isJobRetired(JobID jobID) throws IOException;
-
-  /**
-   * Gets the location of the history file for a retired job. <br/>
-   * 
-   * @param id
-   *          of the job
-   * @return location of history file
-   * @throws IOException
-   */
-  String getJobHistoryLocationForRetiredJob(JobID jobID) throws IOException;
-}
diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JobInfo.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JobInfo.java
deleted file mode 100644
index b5f2f92..0000000
--- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JobInfo.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.test.system;
-
-import java.util.List;
-
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.mapred.JobStatus;
-import org.apache.hadoop.mapreduce.JobID;
-
-/**
- * Job state information as seen by the JobTracker.
- */
-public interface JobInfo extends Writable {
-  /**
-   * Gets the JobId of the job.<br/>
-   * 
-   * @return id of the job.
-   */
-  JobID getID();
-
-  /**
-   * Gets the current status of the job.<br/>
-   * 
-   * @return status.
-   */
-  JobStatus getStatus();
-
-  /**
-   * Gets the history location of the job.<br/>
-   * 
-   * @return the path to the history file.
-   */
-  String getHistoryUrl();
-
-  /**
-   * Gets the number of maps which are currently running for the job. <br/>
-   * 
-   * @return number of running for the job.
-   */
-  int runningMaps();
-
-  /**
-   * Gets the number of reduces currently running for the job. <br/>
-   * 
-   * @return number of reduces running for the job.
-   */
-  int runningReduces();
-
-  /**
-   * Gets the number of maps to be scheduled for the job. <br/>
-   * 
-   * @return number of waiting maps.
-   */
-  int waitingMaps();
-
-  /**
-   * Gets the number of reduces to be scheduled for the job. <br/>
-   * 
-   * @return number of waiting reduces.
-   */
-  int waitingReduces();
-  
-  /**
-   * Gets the number of maps that are finished. <br/>
-   * @return the number of finished maps.
-   */
-  int finishedMaps();
-  
-  /**
-   * Gets the number of map tasks that are to be spawned for the job <br/>
-   * @return
-   */
-  int numMaps();
-  
-  /**
-   * Gets the number of reduce tasks that are to be spawned for the job <br/>
-   * @return
-   */
-  int numReduces();
-  
-  /**
-   * Gets the number of reduces that are finished. <br/>
-   * @return the number of finished reduces.
-   */
-  int finishedReduces();
-
-  /**
-   * Gets if cleanup for the job has been launched.<br/>
-   * 
-   * @return true if cleanup task has been launched.
-   */
-  boolean isCleanupLaunched();
-
-  /**
-   * Gets if the setup for the job has been launched.<br/>
-   * 
-   * @return true if setup task has been launched.
-   */
-  boolean isSetupLaunched();
-
-  /**
-   * Gets if the setup for the job has been completed.<br/>
-   * 
-   * @return true if the setup task for the job has completed.
-   */
-  boolean isSetupFinished();
-
-  /**
-   * Gets list of blacklisted trackers for the particular job. <br/>
-   * 
-   * @return list of blacklisted tracker name.
-   */
-  List<String> getBlackListedTrackers();
-  
-  /**
-   * Gets if the history file of the job is copied to the done 
-   * location <br/>
-   * 
-   * @return true if history file copied.
-   */
-  boolean isHistoryFileCopied();
-}
\ No newline at end of file
diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRCluster.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRCluster.java
deleted file mode 100644
index fc460cd..0000000
--- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRCluster.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.test.system;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Iterator;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
-import org.apache.hadoop.test.system.AbstractDaemonClient;
-import org.apache.hadoop.test.system.AbstractDaemonCluster;
-import org.apache.hadoop.test.system.process.ClusterProcessManager;
-import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster;
-import org.apache.hadoop.test.system.process.MultiUserHadoopDaemonRemoteCluster;
-import org.apache.hadoop.test.system.process.RemoteProcess;
-import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster.HadoopDaemonInfo;
-
-/**
- * Concrete AbstractDaemonCluster representing a Map-Reduce cluster.
- * 
- */
-@SuppressWarnings("unchecked")
-public class MRCluster extends AbstractDaemonCluster {
-
-  private static final Log LOG = LogFactory.getLog(MRCluster.class);
-  public static final String CLUSTER_PROCESS_MGR_IMPL = 
-    "test.system.mr.clusterprocess.impl.class";
-
-  /**
-   * Key is used to to point to the file containing hostnames of tasktrackers
-   */
-  public static final String CONF_HADOOP_TT_HOSTFILE_NAME =
-    "test.system.hdrc.tt.hostfile";
-
-  private static List<HadoopDaemonInfo> mrDaemonInfos = 
-    new ArrayList<HadoopDaemonInfo>();
-  private static String TT_hostFileName;
-  private static String jtHostName;
-  private static final String SYSTEM_TEST_FILE = "system-test.xml";
-
-  protected enum Role {JT, TT};
-
-  static{
-    Configuration.addDefaultResource("mapred-default.xml");
-    Configuration.addDefaultResource("mapred-site.xml");
-  }
-
-  private MRCluster(Configuration conf, ClusterProcessManager rCluster)
-      throws IOException {
-    super(conf, rCluster);
-  }
-
-  /**
-   * Factory method to create an instance of the Map-Reduce cluster.<br/>
-   * 
-   * @param conf
-   *          contains all required parameter to create cluster.
-   * @return a cluster instance to be managed.
-   * @throws Exception
-   */
-  public static MRCluster createCluster(Configuration conf) 
-      throws Exception {
-    conf.addResource(SYSTEM_TEST_FILE);
-    TT_hostFileName = conf.get(CONF_HADOOP_TT_HOSTFILE_NAME, "slaves");
-    String jtHostPort = conf.get(JTConfig.JT_IPC_ADDRESS);
-    if (jtHostPort == null) {
-      throw new Exception(JTConfig.JT_IPC_ADDRESS + "is not set or "
-        + SYSTEM_TEST_FILE + " hasn't been found.");
-    }
-    jtHostName = jtHostPort.trim().split(":")[0];
-    
-    mrDaemonInfos.add(new HadoopDaemonInfo("jobtracker", 
-        Role.JT, Arrays.asList(new String[]{jtHostName})));
-    mrDaemonInfos.add(new HadoopDaemonInfo("tasktracker", 
-        Role.TT, TT_hostFileName));
-    
-    String implKlass = conf.get(CLUSTER_PROCESS_MGR_IMPL);
-    if (implKlass == null || implKlass.isEmpty()) {
-      implKlass = MRProcessManager.class.getName();
-    }
-    Class<ClusterProcessManager> klass = (Class<ClusterProcessManager>) Class
-      .forName(implKlass);
-    ClusterProcessManager clusterProcessMgr = klass.newInstance();
-    LOG.info("Created ClusterProcessManager as " + implKlass);
-    clusterProcessMgr.init(conf);
-    return new MRCluster(conf, clusterProcessMgr);
-  }
-
-  protected JTClient createJTClient(RemoteProcess jtDaemon)
-      throws IOException {
-    return new JTClient(getConf(), jtDaemon);
-  }
-
-  protected TTClient createTTClient(RemoteProcess ttDaemon) 
-      throws IOException {
-    return new TTClient(getConf(), ttDaemon);
-  }
-
-  public JTClient getJTClient() {
-    Iterator<AbstractDaemonClient> it = getDaemons().get(Role.JT).iterator();
-    return (JTClient) it.next();
-  }
-
-  public List<TTClient> getTTClients() {
-    return (List) getDaemons().get(Role.TT);
-  }
-
-  public TTClient getTTClient(String hostname) {
-    for (TTClient c : getTTClients()) {
-      if (c.getHostName().equals(hostname)) {
-        return c;
-      }
-    }
-    return null;
-  }
-
-  @Override
-  public void ensureClean() throws IOException {
-    //TODO: ensure that no jobs/tasks are running
-    //restart the cluster if cleanup fails
-    JTClient jtClient = getJTClient();
-    JobInfo[] jobs = jtClient.getProxy().getAllJobInfo();
-    for(JobInfo job : jobs) {
-      jtClient.killJob(
-          org.apache.hadoop.mapred.JobID.downgrade(job.getID()));
-    }
-  }
-
-  @Override
-  protected AbstractDaemonClient createClient(
-      RemoteProcess process) throws IOException {
-    if (Role.JT.equals(process.getRole())) {
-      return createJTClient(process);
-    } else if (Role.TT.equals(process.getRole())) {
-      return createTTClient(process);
-    } else throw new IOException("Role: "+ process.getRole() + "  is not " +
-      "applicable to MRCluster");
-  }
-
-  public static class MRProcessManager extends HadoopDaemonRemoteCluster{
-    public MRProcessManager() {
-      super(mrDaemonInfos);
-    }
-  }
-
-  public static class MultiMRProcessManager
-      extends MultiUserHadoopDaemonRemoteCluster {
-    public MultiMRProcessManager() {
-      super(mrDaemonInfos);
-    }
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRDaemonClient.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRDaemonClient.java
deleted file mode 100644
index c1166d3..0000000
--- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRDaemonClient.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.test.system;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.MRConfig;
-import org.apache.hadoop.test.system.AbstractDaemonClient;
-import org.apache.hadoop.test.system.DaemonProtocol;
-import org.apache.hadoop.test.system.process.RemoteProcess;
-
-/**
- * Base class for JobTracker and TaskTracker clients.
- */
-public abstract class MRDaemonClient<PROXY extends DaemonProtocol> 
-    extends AbstractDaemonClient<PROXY>{
-
-  public MRDaemonClient(Configuration conf, RemoteProcess process)
-      throws IOException {
-    super(conf, process);
-  }
-
-  public String[] getMapredLocalDirs() throws IOException {
-    return getProxy().getDaemonConf().getStrings(MRConfig.LOCAL_DIR);
-  }
-
-  public String getLogDir() throws IOException {
-    return getProcessInfo().getSystemProperties().get("hadoop.log.dir");
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTClient.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTClient.java
deleted file mode 100644
index 5303309..0000000
--- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTClient.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.test.system;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.mapred.JobTracker;
-import org.apache.hadoop.mapred.TaskTrackerStatus;
-import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.test.system.process.RemoteProcess;
-
-/**
- * TaskTracker client for system tests. Assumption of the class is that the
- * configuration key is set for the configuration key : {@code
- * TTConfig.TT_REPORT_ADDRESS}is set, only the port portion of the
- * address is used.
- */
-public class TTClient extends MRDaemonClient<TTProtocol> {
-
-  TTProtocol proxy;
-  private static final String SYSTEM_TEST_FILE = "system-test.xml";
-  private static final String HADOOP_TT_OPTS_ENV = "HADOOP_TASKTRACKER_OPTS";
-
-  public TTClient(Configuration conf, RemoteProcess daemon) 
-      throws IOException {
-    super(conf, daemon);
-  }
-
-  @Override
-  public synchronized void connect() throws IOException {
-    if (isConnected()) {
-      return;
-    }
-    String sockAddrStr = getConf().get(TTConfig.TT_REPORT_ADDRESS);
-    if (sockAddrStr == null) {
-      throw new IllegalArgumentException(
-          "TaskTracker report address is not set");
-    }
-    String[] splits = sockAddrStr.split(":");
-    if (splits.length != 2) {
-      throw new IllegalArgumentException(TTConfig.TT_REPORT_ADDRESS
-        + " is not correctly configured or "
-        + SYSTEM_TEST_FILE + " hasn't been found.");
-    }
-    String port = splits[1];
-    String sockAddr = getHostName() + ":" + port;
-    InetSocketAddress bindAddr = NetUtils.createSocketAddr(sockAddr);
-    proxy = (TTProtocol) RPC.getProxy(TTProtocol.class, TTProtocol.versionID,
-        bindAddr, getConf());
-    setConnected(true);
-  }
-
-  @Override
-  public synchronized void disconnect() throws IOException {
-    RPC.stopProxy(proxy);
-  }
-
-  @Override
-  public synchronized TTProtocol getProxy() {
-    return proxy;
-  }
-
-  /**
-   * Gets the last sent status to the {@link JobTracker}. <br/>
-   * 
-   * @return the task tracker status.
-   * @throws IOException
-   */
-  public TaskTrackerStatus getStatus() throws IOException {
-    return getProxy().getStatus();
-  }
-
-  @Override
-  public String getHadoopOptsEnvName() {
-    return HADOOP_TT_OPTS_ENV;
-  }
-
-  /**
-   * Concrete implementation of abstract super class method
-   *
-   * @param attributeName name of the attribute to be retrieved
-   * @return Object value of the given attribute
-   * @throws IOException is thrown in case of communication errors
-   */
-  @Override
-  public Object getDaemonAttribute(String attributeName) throws IOException {
-    return getJmxAttribute("TaskTracker", "TaskTrackerInfo", attributeName);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTInfo.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTInfo.java
deleted file mode 100644
index 23c9459..0000000
--- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTInfo.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.test.system;
-
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.mapred.TaskTracker;
-import org.apache.hadoop.mapred.TaskTrackerStatus;
-
-/**
- * TaskTracker state information as seen by the JobTracker.
- */
-public interface TTInfo extends Writable {
-  /**
-   * Gets the {@link TaskTracker} name.<br/>
-   * 
-   * @return name of the tracker.
-   */
-  String getName();
-
-  /**
-   * Gets the current status of the {@link TaskTracker} <br/>
-   * 
-   * @return status of the {@link TaskTracker}
-   */
-  TaskTrackerStatus getStatus();
-}
\ No newline at end of file
diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTProtocol.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTProtocol.java
deleted file mode 100644
index 58dce3a..0000000
--- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTProtocol.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.test.system;
-
-import org.apache.hadoop.mapred.JobTracker;
-import org.apache.hadoop.mapred.TaskTracker;
-import org.apache.hadoop.mapred.TaskTrackerStatus;
-import org.apache.hadoop.mapreduce.TaskID;
-import org.apache.hadoop.mapreduce.security.token.JobTokenSelector;
-import org.apache.hadoop.security.KerberosInfo;
-import org.apache.hadoop.security.token.TokenInfo;
-import org.apache.hadoop.test.system.DaemonProtocol;
-
-import java.io.IOException;
-
-/**
- * TaskTracker RPC interface to be used for cluster tests.
- *
- * The protocol has to be annotated so KerberosInfo can be filled in during
- * creation of a ipc.Client connection
- */
-@KerberosInfo(
-    serverPrincipal = TaskTracker.TT_USER_NAME)
-@TokenInfo(JobTokenSelector.class)
-public interface TTProtocol extends DaemonProtocol {
-
-  public static final long versionID = 1L;
-  /**
-   * Gets latest status which was sent in heartbeat to the {@link JobTracker}. 
-   * <br/>
-   * 
-   * @return status of the TaskTracker daemon
-   * @throws IOException in case of errors
-   */
-  TaskTrackerStatus getStatus() throws IOException;
-
-  /**
-   * Gets list of all the tasks in the {@link TaskTracker}.<br/>
-   * 
-   * @return list of all the tasks
-   * @throws IOException in case of errors
-   */
-  TTTaskInfo[] getTasks() throws IOException;
-
-  /**
-   * Gets the task associated with the id.<br/>
-   * 
-   * @param taskID of the task.
-   * 
-   * @return returns task info <code>TTTaskInfo</code>
-   * @throws IOException in case of errors
-   */
-  TTTaskInfo getTask(TaskID taskID) throws IOException;
-
-  /**
-   * Checks if any of process in the process tree of the task is alive
-   * or not. <br/>
-   * 
-   * @param pid
-   *          of the task attempt
-   * @return true if task process tree is alive.
-   * @throws IOException in case of errors
-   */
-  boolean isProcessTreeAlive(String pid) throws IOException;
-}
diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTTaskInfo.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTTaskInfo.java
deleted file mode 100644
index f03173a..0000000
--- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTTaskInfo.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.test.system;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.mapred.TaskStatus;
-import org.apache.hadoop.mapred.TaskTracker;
-
-/**
- * Task state information as seen by the TT.
- */
-public interface TTTaskInfo extends Writable {
-
-  /**
-   * Has task occupied a slot? A task occupies a slot once it starts localizing
-   * on the {@link TaskTracker} <br/>
-   * 
-   * @return true if task has started occupying a slot.
-   */
-  boolean slotTaken();
-
-  /**
-   * Has the task been killed? <br/>
-   * 
-   * @return true, if task has been killed.
-   */
-  boolean wasKilled();
-
-  /**
-   * Gets the task status associated with the particular task trackers task 
-   * view.<br/>
-   * 
-   * @return status of the particular task
-   */
-  TaskStatus getTaskStatus();
-  
-  /**
-   * Gets the configuration object of the task.
-   * @return
-   */
-  Configuration getConf();
-  
-  /**
-   * Gets the user of the task.
-   * @return
-   */
-  String getUser();
-  
-  /**
-   * Provides information as to whether the task is a cleanup of task.
-   * @return true if it is a clean up of task.
-   */
-  boolean isTaskCleanupTask();
-
-  /**
-   * Gets the pid of the running task on the task-tracker.
-   * 
-   * @return pid of the task.
-   */
-  String getPid();
-}
diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TaskInfo.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TaskInfo.java
deleted file mode 100644
index 738b596..0000000
--- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TaskInfo.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce.test.system;
-
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.mapred.JobTracker;
-import org.apache.hadoop.mapred.TaskStatus;
-import org.apache.hadoop.mapreduce.TaskID;
-
-/**
- * Task state information of a TaskInProgress as seen by the {@link JobTracker}
- */
-public interface TaskInfo extends Writable {
-  /**
-   * Gets the task id of the TaskInProgress.
-   * 
-   * @return id of the task.
-   */
-  TaskID getTaskID();
-
-  /**
-   * Number of times task attempts have failed for the given TaskInProgress.
-   * <br/>
-   * 
-   * @return number of failed task attempts.
-   */
-  int numFailedAttempts();
-
-  /**
-   * Number of times task attempts have been killed for the given TaskInProgress 
-   * <br/>
-   * 
-   * @return number of killed task attempts.
-   */
-  int numKilledAttempts();
-
-  /**
-   * Gets the progress of the Task in percentage will be in range of 0.0-1.0 
-   * <br/>
-   * 
-   * @return progress of task in percentage.
-   */
-  double getProgress();
-
-  /**
-   * Number of attempts currently running for the given TaskInProgress.<br/>
-   * 
-   * @return number of running attempts.
-   */
-  int numRunningAttempts();
-
-  /**
-   * Array of TaskStatus objects that are related to the corresponding
-   * TaskInProgress object.The task status of the tip is only populated
-   * once a tracker reports back the task status.<br/>
-   * 
-   * @return list of task statuses.
-   */
-  TaskStatus[] getTaskStatus();
-
-  /**
-   * Gets a list of tracker on which the task attempts are scheduled/running.
-   * Can be empty if the task attempt has succeeded <br/>
-   * 
-   * @return list of trackers
-   */
-  String[] getTaskTrackers();
-
-  /**
-   * Gets if the current TaskInProgress is a setup or cleanup tip. <br/>
-   * 
-   * @return true if setup/cleanup
-   */
-  boolean isSetupOrCleanup();
-}
diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestCluster.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestCluster.java
deleted file mode 100644
index 413e3bb..0000000
--- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestCluster.java
+++ /dev/null
@@ -1,325 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.security.PrivilegedExceptionAction;
-import java.util.Collection;
-
-import org.junit.Assert;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.SleepJob;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.mapred.TaskStatus.State;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.mapreduce.TaskID;
-import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction;
-import org.apache.hadoop.mapreduce.test.system.JTClient;
-import org.apache.hadoop.mapreduce.test.system.JTProtocol;
-import org.apache.hadoop.mapreduce.test.system.JobInfo;
-import org.apache.hadoop.mapreduce.test.system.MRCluster;
-import org.apache.hadoop.mapreduce.test.system.TTClient;
-import org.apache.hadoop.mapreduce.test.system.TTInfo;
-import org.apache.hadoop.mapreduce.test.system.TTTaskInfo;
-import org.apache.hadoop.mapreduce.test.system.TaskInfo;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.test.system.AbstractDaemonClient;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class TestCluster {
-
-  private static final Log LOG = LogFactory.getLog(TestCluster.class);
-
-  private static MRCluster cluster;
-
-  public TestCluster() throws Exception {
-
-  }
-
-  @BeforeClass
-  public static void before() throws Exception {
-    String [] expExcludeList = new String[2];
-    expExcludeList[0] = "java.net.ConnectException";
-    expExcludeList[1] = "java.io.IOException";
-    cluster = MRCluster.createCluster(new Configuration());
-    cluster.setExcludeExpList(expExcludeList);
-    cluster.setUp();
-  }
-
-  @AfterClass
-  public static void after() throws Exception {
-    cluster.tearDown();
-  }
-
-  @Test
-  public void testProcessInfo() throws Exception {
-    LOG.info("Process info of JobTracker is : "
-        + cluster.getJTClient().getProcessInfo());
-    Assert.assertNotNull(cluster.getJTClient().getProcessInfo());
-    Collection<TTClient> tts = cluster.getTTClients();
-    for (TTClient tt : tts) {
-      LOG.info("Process info of TaskTracker is : " + tt.getProcessInfo());
-      Assert.assertNotNull(tt.getProcessInfo());
-    }
-  }
-
-  @Test
-  public void testJobSubmission() throws Exception {
-    Configuration conf = new Configuration(cluster.getConf());
-    SleepJob job = new SleepJob();
-    job.setConf(conf);
-    Job rJob = job.createJob(1, 1, 100, 100, 100, 100);
-    rJob = cluster.getJTClient().submitAndVerifyJob(rJob);
-    cluster.getJTClient().verifyJobHistory(rJob.getJobID());
-  }
-
-  // @Test
-  public void testFileStatus() throws Exception {
-    UserGroupInformation ugi =
-        UserGroupInformation.createRemoteUser(cluster
-            .getJTClient().getProxy().getDaemonUser());
-    ugi.doAs(new PrivilegedExceptionAction<Void>() {
-      @Override
-      public Void run() throws Exception {
-        MRCluster myCluster = null;
-        try {
-          myCluster = MRCluster.createCluster(cluster.getConf());
-          myCluster.connect();
-          JTClient jt = myCluster.getJTClient();
-          String dir = ".";
-          checkFileStatus(jt.getFileStatus(dir, true));
-          checkFileStatus(jt.listStatus(dir, false, true), dir);
-          for (TTClient tt : myCluster.getTTClients()) {
-            String[] localDirs = tt.getMapredLocalDirs();
-            for (String localDir : localDirs) {
-              checkFileStatus(tt.listStatus(localDir, true, false), localDir);
-              checkFileStatus(tt.listStatus(localDir, true, true), localDir);
-            }
-          }
-          String systemDir = jt.getClient().getSystemDir().toString();
-          checkFileStatus(jt.listStatus(systemDir, false, true), systemDir);
-          checkFileStatus(jt.listStatus(jt.getLogDir(), true, true), jt
-              .getLogDir());
-        } finally {
-          if (myCluster != null) {
-            myCluster.disconnect();
-          }
-        }
-        return null;
-      }
-    });
-  }
-
-  private void checkFileStatus(FileStatus[] fs, String path) {
-    Assert.assertNotNull(fs);
-    LOG.info("-----Listing for " + path + "  " + fs.length);
-    for (FileStatus fz : fs) {
-      checkFileStatus(fz);
-    }
-  }
-
-  private void checkFileStatus(FileStatus fz) {
-    Assert.assertNotNull(fz);
-    LOG.info("FileStatus is "
-        + fz.getPath() + "  " + fz.getPermission() + "  " + fz.getOwner()
-        + "  " + fz.getGroup() + "  " + fz.getClass());
-  }
-
-  /**
-   * Test to verify the common properties of tasks.
-   * 
-   * @throws Exception
-   */
-  @Test
-  public void testTaskDetails() throws Exception {
-    Configuration conf = new Configuration(cluster.getConf());
-    JTProtocol wovenClient = cluster.getJTClient().getProxy();
-    FinishTaskControlAction.configureControlActionForJob(conf);
-    SleepJob job = new SleepJob();
-    job.setConf(conf);
-
-    Job rJob = job.createJob(1, 1, 100, 100, 100, 100);
-    JobClient client = cluster.getJTClient().getClient();
-    rJob.submit();
-    RunningJob rJob1 =
-        client.getJob(org.apache.hadoop.mapred.JobID.downgrade(rJob.getJobID()));
-    JobID id = rJob.getJobID();
-
-    JobInfo jInfo = wovenClient.getJobInfo(id);
-
-    while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
-      Thread.sleep(1000);
-      jInfo = wovenClient.getJobInfo(id);
-    }
-
-    LOG.info("Waiting till job starts running one map");
-
-    TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(id);
-    boolean isOneTaskStored = false;
-    String sometaskpid = null;
-    org.apache.hadoop.mapreduce.TaskAttemptID sometaskId = null;
-    TTClient myCli = null;
-    for (TaskInfo info : myTaskInfos) {
-      if (!info.isSetupOrCleanup()) {
-        String[] taskTrackers = info.getTaskTrackers();
-        for (String taskTracker : taskTrackers) {
-          TTInfo ttInfo = wovenClient.getTTInfo(taskTracker);
-          TTClient ttCli = cluster.getTTClient(ttInfo.getStatus().getHost());
-          TaskID taskId = info.getTaskID();
-          TTTaskInfo ttTaskInfo = ttCli.getProxy().getTask(taskId);
-          Assert.assertNotNull(ttTaskInfo);
-          Assert.assertNotNull(ttTaskInfo.getConf());
-          Assert.assertNotNull(ttTaskInfo.getUser());
-          Assert.assertTrue(ttTaskInfo.getTaskStatus().getProgress() >= 0.0);
-          Assert.assertTrue(ttTaskInfo.getTaskStatus().getProgress() <= 1.0);
-          // Get the pid of the task attempt. The task need not have
-          // reported the pid of the task by the time we are checking
-          // the pid. So perform null check.
-          String pid = ttTaskInfo.getPid();
-          int i = 1;
-          while (pid.isEmpty()) {
-            Thread.sleep(1000);
-            LOG.info("Waiting for task to report its pid back");
-            ttTaskInfo = ttCli.getProxy().getTask(taskId);
-            pid = ttTaskInfo.getPid();
-            if (i == 40) {
-              Assert.fail("The task pid not reported for 40 seconds.");
-            }
-            i++;
-          }
-          if (!isOneTaskStored) {
-            sometaskpid = pid;
-            sometaskId = ttTaskInfo.getTaskStatus().getTaskID();
-            myCli = ttCli;
-            isOneTaskStored = true;
-          }
-          LOG.info("verified task progress to be between 0 and 1");
-          State state = ttTaskInfo.getTaskStatus().getRunState();
-          if (ttTaskInfo.getTaskStatus().getProgress() < 1.0
-              && ttTaskInfo.getTaskStatus().getProgress() > 0.0) {
-            Assert.assertEquals(TaskStatus.State.RUNNING, state);
-            LOG.info("verified run state as " + state);
-          }
-          FinishTaskControlAction action =
-              new FinishTaskControlAction(org.apache.hadoop.mapred.TaskID
-                  .downgrade(info.getTaskID()));
-          ttCli.getProxy().sendAction(action);
-        }
-      }
-    }
-    rJob.killJob();
-    int i = 1;
-    while (!rJob.isComplete()) {
-      Thread.sleep(1000);
-      if (i == 40) {
-        Assert
-            .fail("The job not completed within 40 seconds after killing it.");
-      }
-      i++;
-    }
-    TTTaskInfo myTaskInfo = myCli.getProxy().getTask(sometaskId.getTaskID());
-    i = 0;
-    while (myTaskInfo != null && !myTaskInfo.getPid().isEmpty()) {
-      LOG.info("sleeping till task is retired from TT memory");
-      Thread.sleep(1000);
-      myTaskInfo = myCli.getProxy().getTask(sometaskId.getTaskID());
-      if (i == 40) {
-        Assert
-            .fail("Task not retired from TT memory within 40 seconds of job completeing");
-      }
-      i++;
-    }
-    Assert.assertFalse(myCli.getProxy().isProcessTreeAlive(sometaskpid));
-  }
-
-  @Test
-  public void testClusterRestart() throws Exception {
-    cluster.stop();
-    // Give the cluster time to stop the whole cluster.
-    AbstractDaemonClient cli = cluster.getJTClient();
-    int i = 1;
-    while (i < 40) {
-      try {
-        cli.ping();
-        Thread.sleep(1000);
-        i++;
-      } catch (Exception e) {
-        break;
-      }
-    }
-    if (i >= 40) {
-      Assert.fail("JT on " + cli.getHostName() + " Should have been down.");
-    }
-    i = 1;
-    for (AbstractDaemonClient tcli : cluster.getTTClients()) {
-      i = 1;
-      while (i < 40) {
-        try {
-          tcli.ping();
-          Thread.sleep(1000);
-          i++;
-        } catch (Exception e) {
-          break;
-        }
-      }
-      if (i >= 40) {
-        Assert.fail("TT on " + tcli.getHostName() + " Should have been down.");
-      }
-    }
-    cluster.start();
-    cli = cluster.getJTClient();
-    i = 1;
-    while (i < 40) {
-      try {
-        cli.ping();
-        break;
-      } catch (Exception e) {
-        i++;
-        Thread.sleep(1000);
-        LOG.info("Waiting for Jobtracker on host : "
-            + cli.getHostName() + " to come up.");
-      }
-    }
-    if (i >= 40) {
-      Assert.fail("JT on " + cli.getHostName() + " Should have been up.");
-    }
-    for (AbstractDaemonClient tcli : cluster.getTTClients()) {
-      i = 1;
-      while (i < 40) {
-        try {
-          tcli.ping();
-          break;
-        } catch (Exception e) {
-          i++;
-          Thread.sleep(1000);
-          LOG.info("Waiting for Tasktracker on host : "
-              + tcli.getHostName() + " to come up.");
-        }
-      }
-      if (i >= 40) {
-        Assert.fail("TT on " + tcli.getHostName() + " Should have been Up.");
-      }
-    }
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestControlledJob.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestControlledJob.java
deleted file mode 100644
index f43c6fd..0000000
--- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestControlledJob.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import junit.framework.Assert;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.SleepJob;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction;
-import org.apache.hadoop.mapreduce.test.system.JTProtocol;
-import org.apache.hadoop.mapreduce.test.system.JobInfo;
-import org.apache.hadoop.mapreduce.test.system.MRCluster;
-import org.apache.hadoop.mapreduce.test.system.TTClient;
-import org.apache.hadoop.mapreduce.test.system.TaskInfo;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class TestControlledJob {
-  private MRCluster cluster;
-
-  private static final Log LOG = LogFactory.getLog(TestControlledJob.class);
-
-  public TestControlledJob() throws Exception {
-    cluster = MRCluster.createCluster(new Configuration());
-  }
-
-  @Before
-  public void before() throws Exception {
-    cluster.setUp();
-  }
-
-  @After
-  public void after() throws Exception {
-    cluster.tearDown();
-  }
-
-  @Test
-  public void testControlledJob() throws Exception {
-    Configuration conf = new Configuration(cluster.getConf());
-    JTProtocol wovenClient = cluster.getJTClient().getProxy();
-    FinishTaskControlAction.configureControlActionForJob(conf);
-    SleepJob job = new SleepJob();
-    job.setConf(conf);
-
-    Job slpJob = job.createJob(1, 0, 100, 100, 100, 100);
-    slpJob.submit();
-    JobClient client = cluster.getJTClient().getClient();
-
-    RunningJob rJob =
-        client.getJob(org.apache.hadoop.mapred.JobID.downgrade(slpJob
-            .getJobID()));
-    JobID id = rJob.getID();
-
-    JobInfo jInfo = wovenClient.getJobInfo(id);
-
-    while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
-      Thread.sleep(1000);
-      jInfo = wovenClient.getJobInfo(id);
-    }
-
-    LOG.info("Waiting till job starts running one map");
-    jInfo = wovenClient.getJobInfo(id);
-    Assert.assertEquals(jInfo.runningMaps(), 1);
-
-    LOG.info("waiting for another cycle to "
-        + "check if the maps dont finish off");
-    Thread.sleep(1000);
-    jInfo = wovenClient.getJobInfo(id);
-    Assert.assertEquals(jInfo.runningMaps(), 1);
-
-    TaskInfo[] taskInfos = wovenClient.getTaskInfo(id);
-
-    for (TaskInfo info : taskInfos) {
-      LOG.info("constructing control action to signal task to finish");
-      FinishTaskControlAction action =
-          new FinishTaskControlAction(TaskID.downgrade(info.getTaskID()));
-      for (TTClient cli : cluster.getTTClients()) {
-        cli.getProxy().sendAction(action);
-      }
-    }
-
-    jInfo = wovenClient.getJobInfo(id);
-    int i = 1;
-    if (jInfo != null) {
-      while (!jInfo.getStatus().isJobComplete()) {
-        Thread.sleep(1000);
-        jInfo = wovenClient.getJobInfo(id);
-        if (jInfo == null) {
-          break;
-        }
-        if (i > 40) {
-          Assert.fail("Controlled Job with ID : "
-              + jInfo.getID()
-              + " has not completed in 40 seconds after signalling.");
-        }
-        i++;
-      }
-    }
-    LOG.info("Job sucessfully completed after signalling!!!!");
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheModifiedFile.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheModifiedFile.java
deleted file mode 100644
index 35c08d0..0000000
--- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheModifiedFile.java
+++ /dev/null
@@ -1,346 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.DataOutputStream;
-import java.net.URI;
-import java.util.Collection;
-import java.util.ArrayList;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapreduce.test.system.JTProtocol;
-import org.apache.hadoop.mapreduce.test.system.TTClient;
-import org.apache.hadoop.mapreduce.test.system.JobInfo;
-import org.apache.hadoop.mapreduce.test.system.TaskInfo;
-import org.apache.hadoop.mapreduce.test.system.MRCluster;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapred.UtilsForTests;
-
-import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction;
-import org.apache.hadoop.filecache.DistributedCache;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.SleepJob;
-
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
-import org.junit.Test;
-
-/**
- * Verify the Distributed Cache functionality. This test scenario is for a
- * distributed cache file behaviour when it is modified before and after being
- * accessed by maximum two jobs. Once a job uses a distributed cache file that
- * file is stored in the mapred.local.dir. If the next job uses the same file,
- * but with differnt timestamp, then that file is stored again. So, if two jobs
- * choose the same tasktracker for their job execution then, the distributed
- * cache file should be found twice.
- * 
- * This testcase runs a job with a distributed cache file. All the tasks'
- * corresponding tasktracker's handle is got and checked for the presence of
- * distributed cache with proper permissions in the proper directory. Next when
- * job runs again and if any of its tasks hits the same tasktracker, which ran
- * one of the task of the previous job, then that file should be uploaded again
- * and task should not use the old file. This is verified.
- */
-
-public class TestDistributedCacheModifiedFile {
-
-  private static MRCluster cluster = null;
-  private static FileSystem dfs = null;
-  private static FileSystem ttFs = null;
-  private static JobClient client = null;
-  private static FsPermission permission = new FsPermission((short) 00777);
-
-  private static String uriPath = "hdfs:///tmp/test.txt";
-  private static final Path URIPATH = new Path(uriPath);
-  private String distributedFileName = "test.txt";
-
-  static final Log LOG =
-      LogFactory.getLog(TestDistributedCacheModifiedFile.class);
-
-  public TestDistributedCacheModifiedFile() throws Exception {
-  }
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    cluster = MRCluster.createCluster(new Configuration());
-    cluster.setUp();
-    client = cluster.getJTClient().getClient();
-    dfs = client.getFs();
-    // Deleting the file if it already exists
-    dfs.delete(URIPATH, true);
-
-    Collection<TTClient> tts = cluster.getTTClients();
-    // Stopping all TTs
-    for (TTClient tt : tts) {
-      tt.kill();
-    }
-    // Starting all TTs
-    for (TTClient tt : tts) {
-      tt.start();
-    }
-    // Waiting for 5 seconds to make sure tasktrackers are ready
-    Thread.sleep(5000);
-  }
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    cluster.tearDown();
-    dfs.delete(URIPATH, true);
-
-    Collection<TTClient> tts = cluster.getTTClients();
-    // Stopping all TTs
-    for (TTClient tt : tts) {
-      tt.kill();
-    }
-    // Starting all TTs
-    for (TTClient tt : tts) {
-      tt.start();
-    }
-  }
-
-  @Test
-  /**
-   * This tests Distributed Cache for modified file
-   * @param none
-   * @return void
-   */
-  public void testDistributedCache() throws Exception {
-    Configuration conf = new Configuration(cluster.getConf());
-    JTProtocol wovenClient = cluster.getJTClient().getProxy();
-
-    // This counter will check for count of a loop,
-    // which might become infinite.
-    int count = 0;
-    // This boolean will decide whether to run job again
-    boolean continueLoop = true;
-    // counter for job Loop
-    int countLoop = 0;
-    // This counter increases with all the tasktrackers in which tasks ran
-    int taskTrackerCounter = 0;
-    // This will store all the tasktrackers in which tasks ran
-    ArrayList<String> taskTrackerCollection = new ArrayList<String>();
-    // This boolean tells if two tasks ran onteh same tasktracker or not
-    boolean taskTrackerFound = false;
-
-    do {
-      SleepJob job = new SleepJob();
-      job.setConf(conf);
-      Job slpJob = job.createJob(5, 1, 1000, 1000, 100, 100);
-
-      // Before starting, Modify the file
-      String input = "This will be the content of\n" + "distributed cache\n";
-      // Creating the path with the file
-      DataOutputStream file =
-          UtilsForTests.createTmpFileDFS(dfs, URIPATH, permission, input);
-
-      DistributedCache.createSymlink(conf);
-      URI uri = URI.create(uriPath);
-      DistributedCache.addCacheFile(uri, conf);
-      JobConf jconf = new JobConf(conf);
-
-      // Controls the job till all verification is done
-      FinishTaskControlAction.configureControlActionForJob(conf);
-
-      slpJob.submit();
-      // Submitting the job
-      RunningJob rJob =
-          cluster.getJTClient().getClient().getJob(
-              org.apache.hadoop.mapred.JobID.downgrade(slpJob.getJobID()));
-
-      // counter for job Loop
-      countLoop++;
-
-      TTClient tClient = null;
-      JobInfo jInfo = wovenClient.getJobInfo(rJob.getID());
-      LOG.info("jInfo is :" + jInfo);
-
-      // Assert if jobInfo is null
-      Assert.assertNotNull("jobInfo is null", jInfo);
-
-      // Wait for the job to start running.
-      count = 0;
-      while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
-        UtilsForTests.waitFor(10000);
-        count++;
-        jInfo = wovenClient.getJobInfo(rJob.getID());
-        // If the count goes beyond a point, then break; This is to avoid
-        // infinite loop under unforeseen circumstances. Testcase will anyway
-        // fail later.
-        if (count > 10) {
-          Assert.fail("job has not reached running state for more than"
-              + "100 seconds. Failing at this point");
-        }
-      }
-
-      LOG.info("job id is :" + rJob.getID().toString());
-
-      TaskInfo[] taskInfos =
-          cluster.getJTClient().getProxy().getTaskInfo(rJob.getID());
-
-      boolean distCacheFileIsFound;
-
-      for (TaskInfo taskInfo : taskInfos) {
-        distCacheFileIsFound = false;
-        String[] taskTrackers = taskInfo.getTaskTrackers();
-        for (String taskTracker : taskTrackers) {
-          // Formatting tasktracker to get just its FQDN
-          taskTracker = UtilsForTests.getFQDNofTT(taskTracker);
-          LOG.info("taskTracker is :" + taskTracker);
-
-          // The tasktrackerFound variable is initialized
-          taskTrackerFound = false;
-
-          // This will be entered from the second job onwards
-          if (countLoop > 1) {
-            if (taskTracker != null) {
-              continueLoop = taskTrackerCollection.contains(taskTracker);
-            }
-            if (continueLoop) {
-              taskTrackerFound = true;
-            }
-          }
-          // Collecting the tasktrackers
-          if (taskTracker != null)
-            taskTrackerCollection.add(taskTracker);
-
-          // we have loopped through two times to look for task
-          // getting submitted on same tasktrackers.The same tasktracker
-          // for subsequent jobs was not hit maybe because of many number
-          // of tasktrackers. So, testcase has to stop here.
-          if (countLoop > 1) {
-            continueLoop = false;
-          }
-
-          tClient = cluster.getTTClient(taskTracker);
-
-          // tClient maybe null because the task is already dead. Ex: setup
-          if (tClient == null) {
-            continue;
-          }
-
-          String[] localDirs = tClient.getMapredLocalDirs();
-          int distributedFileCount = 0;
-          // Go to every single path
-          for (String localDir : localDirs) {
-            // Public Distributed cache will always be stored under
-            // mapre.local.dir/tasktracker/archive
-            localDir =
-                localDir
-                    + Path.SEPARATOR
-                    + TaskTracker.getPublicDistributedCacheDir();
-            LOG.info("localDir is : " + localDir);
-
-            // Get file status of all the directories
-            // and files under that path.
-            FileStatus[] fileStatuses =
-                tClient.listStatus(localDir, true, true);
-            for (FileStatus fileStatus : fileStatuses) {
-              Path path = fileStatus.getPath();
-              LOG.info("path is :" + path.toString());
-              // Checking if the received path ends with
-              // the distributed filename
-              distCacheFileIsFound =
-                  (path.toString()).endsWith(distributedFileName);
-              // If file is found, check for its permission.
-              // Since the file is found break out of loop
-              if (distCacheFileIsFound) {
-                LOG.info("PATH found is :" + path.toString());
-                distributedFileCount++;
-                String filename = path.getName();
-                FsPermission fsPerm = fileStatus.getPermission();
-                Assert.assertTrue("File Permission is not 777", fsPerm
-                    .equals(new FsPermission("777")));
-              }
-            }
-          }
-
-          LOG.debug("The distributed FileCount is :" + distributedFileCount);
-          LOG.debug("The taskTrackerFound is :" + taskTrackerFound);
-
-          // If distributed cache is modified in dfs
-          // between two job runs, it can be present more than once
-          // in any of the task tracker, in which job ran.
-          if (distributedFileCount != 2 && taskTrackerFound) {
-            Assert.fail("The distributed cache file has to be two. "
-                + "But found was " + distributedFileCount);
-          } else if (distributedFileCount > 1 && !taskTrackerFound) {
-            Assert.fail("The distributed cache file cannot more than one."
-                + " But found was " + distributedFileCount);
-          } else if (distributedFileCount < 1)
-            Assert.fail("The distributed cache file is less than one. "
-                + "But found was " + distributedFileCount);
-          if (!distCacheFileIsFound) {
-            Assert.assertEquals(
-                "The distributed cache file does not exist",
-                distCacheFileIsFound, false);
-          }
-        }
-      }
-      // Allow the job to continue through MR control job.
-      for (TaskInfo taskInfoRemaining : taskInfos) {
-        FinishTaskControlAction action =
-            new FinishTaskControlAction(TaskID.downgrade(taskInfoRemaining
-                .getTaskID()));
-        Collection<TTClient> tts = cluster.getTTClients();
-        for (TTClient cli : tts) {
-          cli.getProxy().sendAction(action);
-        }
-      }
-
-      // Killing the job because all the verification needed
-      // for this testcase is completed.
-      rJob.killJob();
-
-      // Waiting for 3 seconds for cleanup to start
-      Thread.sleep(3000);
-
-      // Getting the last cleanup task's tasktracker also, as
-      // distributed cache gets uploaded even during cleanup.
-      TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(rJob.getID());
-      if (myTaskInfos != null) {
-        for (TaskInfo info : myTaskInfos) {
-          if (info.isSetupOrCleanup()) {
-            String[] taskTrackers = info.getTaskTrackers();
-            for (String taskTracker : taskTrackers) {
-              // Formatting tasktracker to get just its FQDN
-              taskTracker = UtilsForTests.getFQDNofTT(taskTracker);
-              LOG.info("taskTracker is :" + taskTracker);
-              // Collecting the tasktrackers
-              if (taskTracker != null)
-                taskTrackerCollection.add(taskTracker);
-            }
-          }
-        }
-      }
-
-      // Making sure that the job is complete.
-      while (jInfo != null && !jInfo.getStatus().isJobComplete()) {
-        Thread.sleep(10000);
-        jInfo = wovenClient.getJobInfo(rJob.getID());
-      }
-
-    } while (continueLoop);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCachePrivateFile.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCachePrivateFile.java
deleted file mode 100644
index 5d8ff49..0000000
--- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCachePrivateFile.java
+++ /dev/null
@@ -1,284 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.DataOutputStream;
-import java.net.URI;
-import java.util.Collection;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapreduce.test.system.JTProtocol;
-import org.apache.hadoop.mapreduce.test.system.TTClient;
-import org.apache.hadoop.mapreduce.test.system.JobInfo;
-import org.apache.hadoop.mapreduce.test.system.TaskInfo;
-import org.apache.hadoop.mapreduce.test.system.MRCluster;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapred.UtilsForTests;
-
-import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction;
-import org.apache.hadoop.filecache.DistributedCache;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.SleepJob;
-
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
-import org.junit.Test;
-
-/**
- * Verify the Distributed Cache functionality.
- * This test scenario is for a distributed cache file behaviour
- * when the file is private. Once a job uses a distributed 
- * cache file with private permissions that file is stored in the
- * mapred.local.dir, under the directory which has the same name 
- * as job submitter's username. The directory has 700 permission 
- * and the file under it, should have 777 permissions. 
-*/
-
-public class TestDistributedCachePrivateFile {
-
-  private static MRCluster cluster = null;
-  private static FileSystem dfs = null;
-  private static JobClient client = null;
-  private static FsPermission permission = new FsPermission((short)00770);
-
-  private static String uriPath = "hdfs:///tmp/test.txt";
-  private static final Path URIPATH = new Path(uriPath);
-  private String distributedFileName = "test.txt";
-
-  static final Log LOG = LogFactory.
-                           getLog(TestDistributedCachePrivateFile.class);
-
-  public TestDistributedCachePrivateFile() throws Exception {
-  }
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    cluster = MRCluster.createCluster(new Configuration());
-    cluster.setUp();
-    client = cluster.getJTClient().getClient();
-    dfs = client.getFs();
-    //Deleting the file if it already exists
-    dfs.delete(URIPATH, true);
-
-    Collection<TTClient> tts = cluster.getTTClients();
-    //Stopping all TTs
-    for (TTClient tt : tts) {
-      tt.kill();
-    }
-    //Starting all TTs
-    for (TTClient tt : tts) {
-      tt.start();
-    }
-
-    String input = "This will be the content of\n" + "distributed cache\n";
-    //Creating the path with the file
-    DataOutputStream file = 
-        UtilsForTests.createTmpFileDFS(dfs, URIPATH, permission, input);
-  }
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    cluster.tearDown();
-    dfs.delete(URIPATH, true);
-    
-    Collection<TTClient> tts = cluster.getTTClients();
-    //Stopping all TTs
-    for (TTClient tt : tts) {
-      tt.kill();
-    }
-    //Starting all TTs
-    for (TTClient tt : tts) {
-      tt.start();
-    }
-  }
-
-  @Test
-  /**
-   * This tests Distributed Cache for private file
-   * @param none
-   * @return void
-   */
-  public void testDistributedCache() throws Exception {
-    Configuration conf = new Configuration(cluster.getConf());
-    JTProtocol wovenClient = cluster.getJTClient().getProxy();
-
-    //This counter will check for count of a loop,
-    //which might become infinite.
-    int count = 0;
-
-    SleepJob job = new SleepJob();
-    job.setConf(conf);
-    Job slpJob = job.createJob(5, 1, 1000, 1000, 100, 100);
-
-    DistributedCache.createSymlink(conf);
-    URI uri = URI.create(uriPath);
-    DistributedCache.addCacheFile(uri, conf);
-    JobConf jconf = new JobConf(conf);
-
-    //Controls the job till all verification is done 
-    FinishTaskControlAction.configureControlActionForJob(conf);
-
-    //Submitting the job
-    slpJob.submit();
-    RunningJob rJob =
-        cluster.getJTClient().getClient().getJob(org.apache.hadoop.mapred.JobID.downgrade(slpJob.getJobID()));
-
-    JobStatus[] jobStatus = client.getAllJobs();
-    String userName = jobStatus[0].getUsername();
-
-    TTClient tClient = null;
-    JobInfo jInfo = wovenClient.getJobInfo(rJob.getID());
-    LOG.info("jInfo is :" + jInfo);
-
-    //Assert if jobInfo is null
-    Assert.assertNotNull("jobInfo is null", jInfo);
-
-    //Wait for the job to start running.
-    count = 0;
-    while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
-      UtilsForTests.waitFor(10000);
-      count++;
-      jInfo = wovenClient.getJobInfo(rJob.getID());
-      //If the count goes beyond a point, then Assert; This is to avoid
-      //infinite loop under unforeseen circumstances.
-      if (count > 10) {
-        Assert.fail("job has not reached running state for more than" +
-            "100 seconds. Failing at this point");
-      }
-    }
-
-    LOG.info("job id is :" + rJob.getID().toString());
-
-    TaskInfo[] taskInfos = cluster.getJTClient().getProxy()
-           .getTaskInfo(rJob.getID());
-
-    boolean distCacheFileIsFound;
-
-    for (TaskInfo taskInfo : taskInfos) {
-      distCacheFileIsFound = false;
-      String[] taskTrackers = taskInfo.getTaskTrackers();
-
-      for(String taskTracker : taskTrackers) {
-        //Getting the exact FQDN of the tasktracker from
-        //the tasktracker string.
-        taskTracker = UtilsForTests.getFQDNofTT(taskTracker);
-        tClient =  cluster.getTTClient(taskTracker);
-        String[] localDirs = tClient.getMapredLocalDirs();
-        int distributedFileCount = 0;
-        String localDirOnly = null;
-
-        boolean FileNotPresentForThisDirectoryPath = false;
-
-        //Go to every single path
-        for (String localDir : localDirs) {
-          FileNotPresentForThisDirectoryPath = false;
-          localDirOnly = localDir;
-
-          //Public Distributed cache will always be stored under
-          //mapred.local.dir/tasktracker/archive
-          localDirOnly = localDir + Path.SEPARATOR + TaskTracker.SUBDIR + 
-              Path.SEPARATOR +  userName;
-
-          //Private Distributed cache will always be stored under
-          //mapre.local.dir/taskTracker/<username>/distcache
-          //Checking for username directory to check if it has the
-          //proper permissions
-          localDir = localDir + Path.SEPARATOR +
-                  TaskTracker.getPrivateDistributedCacheDir(userName);
-
-          FileStatus fileStatusMapredLocalDirUserName = null;
-
-          try {
-            fileStatusMapredLocalDirUserName = tClient.
-                            getFileStatus(localDirOnly, true);
-          } catch (Exception e) {
-            LOG.info("LocalDirOnly :" + localDirOnly + " not found");
-            FileNotPresentForThisDirectoryPath = true;
-          }
-
-          //File will only be stored under one of the mapred.lcoal.dir
-          //If other paths were hit, just continue  
-          if (FileNotPresentForThisDirectoryPath)
-            continue;
-
-          Path pathMapredLocalDirUserName = 
-              fileStatusMapredLocalDirUserName.getPath();
-          FsPermission fsPermMapredLocalDirUserName =
-              fileStatusMapredLocalDirUserName.getPermission();
-          Assert.assertTrue("Directory Permission is not 700",
-            fsPermMapredLocalDirUserName.equals(new FsPermission("700")));
-
-          //Get file status of all the directories 
-          //and files under that path.
-          FileStatus[] fileStatuses = tClient.listStatus(localDir, 
-              true, true);
-          for (FileStatus  fileStatus : fileStatuses) {
-            Path path = fileStatus.getPath();
-            LOG.info("path is :" + path.toString());
-            //Checking if the received path ends with 
-            //the distributed filename
-            distCacheFileIsFound = (path.toString()).
-                endsWith(distributedFileName);
-            //If file is found, check for its permission. 
-            //Since the file is found break out of loop
-            if (distCacheFileIsFound){
-              LOG.info("PATH found is :" + path.toString());
-              distributedFileCount++;
-              String filename = path.getName();
-              FsPermission fsPerm = fileStatus.getPermission();
-              Assert.assertTrue("File Permission is not 777",
-                fsPerm.equals(new FsPermission("777")));
-            }
-          }
-        }
-
-        LOG.info("Distributed File count is :" + distributedFileCount);
-
-        if (distributedFileCount > 1) {
-          Assert.fail("The distributed cache file is more than one");
-        } else if (distributedFileCount < 1)
-          Assert.fail("The distributed cache file is less than one");
-        if (!distCacheFileIsFound) {
-          Assert.assertEquals("The distributed cache file does not exist", 
-              distCacheFileIsFound, false);
-        }
-      }
-
-      //Allow the job to continue through MR control job.
-      for (TaskInfo taskInfoRemaining : taskInfos) {
-        FinishTaskControlAction action = new FinishTaskControlAction(TaskID
-           .downgrade(taskInfoRemaining.getTaskID()));
-        Collection<TTClient> tts = cluster.getTTClients();
-        for (TTClient cli : tts) {
-          cli.getProxy().sendAction(action);
-        }
-      }
-
-      //Killing the job because all the verification needed
-      //for this testcase is completed.
-      rJob.killJob();
-    }
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheUnModifiedFile.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheUnModifiedFile.java
deleted file mode 100644
index 7a18d64..0000000
--- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheUnModifiedFile.java
+++ /dev/null
@@ -1,305 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.DataOutputStream;
-import java.net.URI;
-import java.util.Collection;
-import java.util.ArrayList;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapreduce.test.system.JTProtocol;
-import org.apache.hadoop.mapreduce.test.system.TTClient;
-import org.apache.hadoop.mapreduce.test.system.JobInfo;
-import org.apache.hadoop.mapreduce.test.system.TaskInfo;
-import org.apache.hadoop.mapreduce.test.system.MRCluster;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapred.UtilsForTests;
-
-import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction;
-import org.apache.hadoop.filecache.DistributedCache;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.SleepJob;
-
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
-import org.junit.Test;
-
-/**
- * Verify the Distributed Cache functionality. This test scenario is for a
- * distributed cache file behaviour when it is not modified before and after
- * being accessed by maximum two jobs. Once a job uses a distributed cache file
- * that file is stored in the mapred.local.dir. If the next job uses the same
- * file, then that is not stored again. So, if two jobs choose the same
- * tasktracker for their job execution then, the distributed cache file should
- * not be found twice.
- * 
- * This testcase runs a job with a distributed cache file. All the tasks'
- * corresponding tasktracker's handle is got and checked for the presence of
- * distributed cache with proper permissions in the proper directory. Next when
- * job runs again and if any of its tasks hits the same tasktracker, which ran
- * one of the task of the previous job, then that file should not be uploaded
- * again and task use the old file. This is verified.
- */
-
-public class TestDistributedCacheUnModifiedFile {
-
-  private static MRCluster cluster = null;
-  private static FileSystem dfs = null;
-  private static FileSystem ttFs = null;
-  private static JobClient client = null;
-  private static FsPermission permission = new FsPermission((short) 00777);
-
-  private static String uriPath = "hdfs:///tmp/test.txt";
-  private static final Path URIPATH = new Path(uriPath);
-  private String distributedFileName = "test.txt";
-
-  static final Log LOG =
-      LogFactory.getLog(TestDistributedCacheUnModifiedFile.class);
-
-  public TestDistributedCacheUnModifiedFile() throws Exception {
-  }
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    cluster = MRCluster.createCluster(new Configuration());
-    cluster.setUp();
-    client = cluster.getJTClient().getClient();
-    dfs = client.getFs();
-    // Deleting the file if it already exists
-    dfs.delete(URIPATH, true);
-
-    Collection<TTClient> tts = cluster.getTTClients();
-    // Stopping all TTs
-    for (TTClient tt : tts) {
-      tt.kill();
-    }
-    // Starting all TTs
-    for (TTClient tt : tts) {
-      tt.start();
-    }
-
-    // Waiting for 5 seconds to make sure tasktrackers are ready
-    Thread.sleep(5000);
-
-    String input = "This will be the content of\n" + "distributed cache\n";
-    // Creating the path with the file
-    DataOutputStream file =
-        UtilsForTests.createTmpFileDFS(dfs, URIPATH, permission, input);
-  }
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    cluster.tearDown();
-    dfs.delete(URIPATH, true);
-
-    Collection<TTClient> tts = cluster.getTTClients();
-    // Stopping all TTs
-    for (TTClient tt : tts) {
-      tt.kill();
-    }
-    // Starting all TTs
-    for (TTClient tt : tts) {
-      tt.start();
-    }
-  }
-
-  @Test
-  /**
-   * This tests Distributed Cache for unmodified file
-   * @param none
-   * @return void
-   */
-  public void testDistributedCache() throws Exception {
-    Configuration conf = new Configuration(cluster.getConf());
-    JTProtocol wovenClient = cluster.getJTClient().getProxy();
-
-    // This counter will check for count of a loop,
-    // which might become infinite.
-    int count = 0;
-    // This boolean will decide whether to run job again
-    boolean continueLoop = true;
-    // counter for job Loop
-    int countLoop = 0;
-    // This counter incerases with all the tasktrackers in which tasks ran
-    int taskTrackerCounter = 0;
-    // This will store all the tasktrackers in which tasks ran
-    ArrayList<String> taskTrackerCollection = new ArrayList<String>();
-
-    do {
-      SleepJob job = new SleepJob();
-      job.setConf(conf);
-      Job slpJob = job.createJob(5, 1, 1000, 1000, 100, 100);
-
-      DistributedCache.createSymlink(conf);
-      URI uri = URI.create(uriPath);
-      DistributedCache.addCacheFile(uri, conf);
-      JobConf jconf = new JobConf(conf);
-
-      // Controls the job till all verification is done
-      FinishTaskControlAction.configureControlActionForJob(conf);
-
-      // Submitting the job
-      slpJob.submit();
-      RunningJob rJob =
-          cluster.getJTClient().getClient().getJob(
-              org.apache.hadoop.mapred.JobID.downgrade(slpJob.getJobID()));
-
-      // counter for job Loop
-      countLoop++;
-
-      TTClient tClient = null;
-      JobInfo jInfo = wovenClient.getJobInfo(rJob.getID());
-      LOG.info("jInfo is :" + jInfo);
-
-      // Assert if jobInfo is null
-      Assert.assertNotNull("jobInfo is null", jInfo);
-
-      // Wait for the job to start running.
-      count = 0;
-      while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
-        UtilsForTests.waitFor(10000);
-        count++;
-        jInfo = wovenClient.getJobInfo(rJob.getID());
-        // If the count goes beyond a point, then break; This is to avoid
-        // infinite loop under unforeseen circumstances. Testcase will anyway
-        // fail later.
-        if (count > 10) {
-          Assert.fail("job has not reached running state for more than"
-              + "100 seconds. Failing at this point");
-        }
-      }
-
-      LOG.info("job id is :" + rJob.getID().toString());
-
-      TaskInfo[] taskInfos =
-          cluster.getJTClient().getProxy().getTaskInfo(rJob.getID());
-
-      boolean distCacheFileIsFound;
-
-      for (TaskInfo taskInfo : taskInfos) {
-        distCacheFileIsFound = false;
-        String[] taskTrackers = taskInfo.getTaskTrackers();
-        for (String taskTracker : taskTrackers) {
-          // Formatting tasktracker to get just its FQDN
-          taskTracker = UtilsForTests.getFQDNofTT(taskTracker);
-          LOG.info("taskTracker is :" + taskTracker);
-
-          // This will be entered from the second job onwards
-          if (countLoop > 1) {
-            if (taskTracker != null) {
-              continueLoop = taskTrackerCollection.contains(taskTracker);
-            }
-            if (!continueLoop) {
-              break;
-            }
-          }
-
-          // Collecting the tasktrackers
-          if (taskTracker != null)
-            taskTrackerCollection.add(taskTracker);
-
-          // we have loopped through enough number of times to look for task
-          // getting submitted on same tasktrackers.The same tasktracker
-          // for subsequent jobs was not hit maybe because of many number
-          // of tasktrackers. So, testcase has to stop here.
-          if (countLoop > 2) {
-            continueLoop = false;
-          }
-
-          tClient = cluster.getTTClient(taskTracker);
-
-          // tClient maybe null because the task is already dead. Ex: setup
-          if (tClient == null) {
-            continue;
-          }
-
-          String[] localDirs = tClient.getMapredLocalDirs();
-          int distributedFileCount = 0;
-          // Go to every single path
-          for (String localDir : localDirs) {
-            // Public Distributed cache will always be stored under
-            // mapre.local.dir/tasktracker/archive
-            localDir =
-                localDir
-                    + Path.SEPARATOR
-                    + TaskTracker.getPublicDistributedCacheDir();
-            LOG.info("localDir is : " + localDir);
-
-            // Get file status of all the directories
-            // and files under that path.
-            FileStatus[] fileStatuses =
-                tClient.listStatus(localDir, true, true);
-            for (FileStatus fileStatus : fileStatuses) {
-              Path path = fileStatus.getPath();
-              LOG.info("path is :" + path.toString());
-              // Checking if the received path ends with
-              // the distributed filename
-              distCacheFileIsFound =
-                  (path.toString()).endsWith(distributedFileName);
-              // If file is found, check for its permission.
-              // Since the file is found break out of loop
-              if (distCacheFileIsFound) {
-                LOG.info("PATH found is :" + path.toString());
-                distributedFileCount++;
-                String filename = path.getName();
-                FsPermission fsPerm = fileStatus.getPermission();
-                Assert.assertTrue("File Permission is not 777", fsPerm
-                    .equals(new FsPermission("777")));
-              }
-            }
-          }
-
-          // Since distributed cache is unmodified in dfs
-          // between two job runs, it should not be present more than once
-          // in any of the task tracker, in which job ran.
-          if (distributedFileCount > 1) {
-            Assert.fail("The distributed cache file is more than one");
-          } else if (distributedFileCount < 1)
-            Assert.fail("The distributed cache file is less than one");
-          if (!distCacheFileIsFound) {
-            Assert.assertEquals(
-                "The distributed cache file does not exist",
-                distCacheFileIsFound, false);
-          }
-        }
-      }
-      // Allow the job to continue through MR control job.
-      for (TaskInfo taskInfoRemaining : taskInfos) {
-        FinishTaskControlAction action =
-            new FinishTaskControlAction(TaskID.downgrade(taskInfoRemaining
-                .getTaskID()));
-        Collection<TTClient> tts = cluster.getTTClients();
-        for (TTClient cli : tts) {
-          cli.getProxy().sendAction(action);
-        }
-      }
-
-      // Killing the job because all the verification needed
-      // for this testcase is completed.
-      rJob.killJob();
-    } while (continueLoop);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestFileOwner.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestFileOwner.java
deleted file mode 100644
index f8f2cda..0000000
--- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestFileOwner.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.MRConfig;
-import org.apache.hadoop.mapreduce.SleepJob;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
-import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction;
-import org.apache.hadoop.mapreduce.test.system.JTProtocol;
-import org.apache.hadoop.mapreduce.test.system.JobInfo;
-import org.apache.hadoop.mapreduce.test.system.MRCluster;
-import org.apache.hadoop.mapreduce.test.system.TTClient;
-import org.apache.hadoop.mapreduce.test.system.TTInfo;
-import org.apache.hadoop.mapreduce.test.system.TTTaskInfo;
-import org.apache.hadoop.mapreduce.test.system.TaskInfo;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class TestFileOwner {
-  public static MRCluster cluster;
-
-  private StringBuffer jobIdDir = new StringBuffer();
-  private JTProtocol wovenClient = null;
-  private static final Log LOG = LogFactory.getLog(TestFileOwner.class);
-  private String taskController = null;
-  private final FsPermission PERM_777 = new FsPermission("777");
-  private final FsPermission PERM_755 = new FsPermission("755");
-  private final FsPermission PERM_644 = new FsPermission("644");
-
-  @BeforeClass
-  public static void setUp() throws java.lang.Exception {
-    cluster = MRCluster.createCluster(new Configuration());
-    cluster.setUp();
-  }
-
-  /*
-   * The test is used to check the file permission of local files in
-   * mapred.local.dir. The job control is used which will make the tasks wait
-   * for completion until it is signaled
-   * 
-   * @throws Exception in case of test errors
-   */
-  @Test
-  public void testFilePermission() throws Exception {
-    wovenClient = cluster.getJTClient().getProxy();
-    Configuration conf = new Configuration(cluster.getConf());
-    FinishTaskControlAction.configureControlActionForJob(conf);
-    SleepJob job = new SleepJob();
-    job.setConf(conf);
-    Job slpJob = job.createJob(1, 0, 100, 100, 100, 100);
-    JobConf jconf = new JobConf(conf);
-    slpJob.submit();
-    RunningJob rJob =
-        cluster.getJTClient().getClient().getJob(
-            org.apache.hadoop.mapred.JobID.downgrade(slpJob.getJobID()));
-    taskController = conf.get(TTConfig.TT_TASK_CONTROLLER);
-    // get the job info so we can get the env variables from the daemon.
-    // Now wait for the task to be in the running state, only then the
-    // directories will be created
-    JobInfo info = wovenClient.getJobInfo(rJob.getID());
-    Assert.assertNotNull("JobInfo is null", info);
-    JobID id = rJob.getID();
-    while (info.runningMaps() != 1) {
-      Thread.sleep(1000);
-      info = wovenClient.getJobInfo(id);
-    }
-    TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(id);
-    for (TaskInfo tInfo : myTaskInfos) {
-      if (!tInfo.isSetupOrCleanup()) {
-        String[] taskTrackers = tInfo.getTaskTrackers();
-        for (String taskTracker : taskTrackers) {
-          TTInfo ttInfo = wovenClient.getTTInfo(taskTracker);
-          TTClient ttCli = cluster.getTTClient(ttInfo.getStatus().getHost());
-          Assert.assertNotNull("TTClient instance is null", ttCli);
-          TTTaskInfo ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
-          Assert.assertNotNull("TTTaskInfo is null", ttTaskInfo);
-          while (ttTaskInfo.getTaskStatus().getRunState() != TaskStatus.State.RUNNING) {
-            Thread.sleep(100);
-            ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
-          }
-          testPermissionWithTaskController(ttCli, conf, info);
-          FinishTaskControlAction action =
-              new FinishTaskControlAction(TaskID.downgrade(tInfo.getTaskID()));
-          for (TTClient cli : cluster.getTTClients()) {
-            cli.getProxy().sendAction(action);
-          }
-        }
-      }
-    }
-    JobInfo jInfo = wovenClient.getJobInfo(id);
-    jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
-    while (!jInfo.getStatus().isJobComplete()) {
-      Thread.sleep(100);
-      jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
-    }
-  }
-
-  private void testPermissionWithTaskController(
-      TTClient tClient, Configuration conf, JobInfo info) {
-    Assert.assertNotNull("TTclient is null", tClient);
-    FsPermission fsPerm = null;
-    String[] pathInfo = conf.getStrings(MRConfig.LOCAL_DIR);
-    for (int i = 0; i < pathInfo.length; i++) {
-      // First verify the jobid directory exists
-      jobIdDir = new StringBuffer();
-      String userName = null;
-      try {
-        JobStatus[] jobStatus = cluster.getJTClient().getClient().getAllJobs();
-        userName = jobStatus[0].getUsername();
-      } catch (Exception ex) {
-        LOG.error("Failed to get user name");
-        boolean status = false;
-        Assert.assertTrue("Failed to get the userName", status);
-      }
-      jobIdDir.append(pathInfo[i]).append(Path.SEPARATOR);
-      jobIdDir.append(TaskTracker.getLocalJobDir(userName, info
-          .getID().toString()));
-      FileStatus[] fs = null;
-      try {
-        fs = tClient.listStatus(jobIdDir.toString(), true);
-      } catch (Exception ex) {
-        LOG.error("Failed to get the jobIdDir files " + ex);
-      }
-      Assert.assertEquals("Filestatus length is zero", fs.length != 0, true);
-      for (FileStatus file : fs) {
-        try {
-          String filename = file.getPath().getName();
-          if (filename.equals(TaskTracker.JOBFILE)) {
-            if (taskController == DefaultTaskController.class.getName()) {
-              fsPerm = file.getPermission();
-              Assert.assertTrue("FilePermission failed for " + filename, fsPerm
-                  .equals(PERM_777));
-            }
-          }
-          if (filename.startsWith("attempt")) {
-            StringBuffer attemptDir = new StringBuffer(jobIdDir);
-            attemptDir.append(Path.SEPARATOR).append(filename);
-            if (tClient.getFileStatus(attemptDir.toString(), true) != null) {
-              FileStatus[] attemptFs =
-                  tClient.listStatus(attemptDir.toString(), true, true);
-              for (FileStatus attemptfz : attemptFs) {
-                Assert.assertNotNull("FileStatus is null", attemptfz);
-                fsPerm = attemptfz.getPermission();
-                Assert.assertNotNull("FsPermission is null", fsPerm);
-                if (taskController == DefaultTaskController.class.getName()) {
-                  if (!attemptfz.isDir()) {
-                    Assert.assertTrue(
-                        "FilePermission failed for " + filename, fsPerm
-                            .equals(PERM_777));
-                  } else {
-                    Assert.assertTrue(
-                        "FilePermission failed for " + filename, fsPerm
-                            .equals(PERM_755));
-                  }
-                }
-              }
-            }
-          }
-          if (filename.equals(TaskTracker.TASKJARDIR)) {
-            StringBuffer jarsDir = new StringBuffer(jobIdDir);
-            jarsDir.append(Path.SEPARATOR).append(filename);
-            FileStatus[] jarsFs =
-                tClient.listStatus(jarsDir.toString(), true, true);
-            for (FileStatus jarsfz : jarsFs) {
-              Assert.assertNotNull("FileStatus is null", jarsfz);
-              fsPerm = jarsfz.getPermission();
-              Assert.assertNotNull("File permission is null", fsPerm);
-              if (taskController == DefaultTaskController.class.getName()) {
-                if (!jarsfz.isDir()) {
-                  if (jarsfz.getPath().getName().equals("job.jar")) {
-                    Assert.assertTrue(
-                        "FilePermission failed for " + filename, fsPerm
-                            .equals(PERM_777));
-                  } else {
-                    Assert.assertTrue(
-                        "FilePermission failed for " + filename, fsPerm
-                            .equals(PERM_644));
-                  }
-                } else {
-                  Assert.assertTrue(
-                      "FilePermission failed for " + filename, fsPerm
-                          .equals(PERM_755));
-                }
-              }
-            }
-          }
-        } catch (Exception ex) {
-          LOG.error("The exception occurred while searching for nonexsistent"
-              + "file, ignoring and continuing. " + ex);
-        }
-      }// for loop ends
-    }// for loop ends
-  }
-
-  @AfterClass
-  public static void tearDown() throws java.lang.Exception {
-    cluster.tearDown();
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestJobKill.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestJobKill.java
deleted file mode 100644
index 92c07b2..0000000
--- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestJobKill.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.*;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.Reducer;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.hadoop.mapreduce.test.system.JTProtocol;
-import org.apache.hadoop.mapreduce.test.system.JobInfo;
-import org.apache.hadoop.mapreduce.test.system.MRCluster;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import testjar.JobKillCommitter;
-
-public class TestJobKill {
-  private static final Log LOG = LogFactory.getLog(TestJobKill.class);
-  private JTProtocol wovenClient = null;
-  private static Path outDir = new Path("output");
-  private static Path inDir = new Path("input");
-  private static FileSystem fs = null;
-  private static MRCluster cluster;
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    cluster = MRCluster.createCluster(new Configuration());
-    cluster.setUp();
-    fs = inDir.getFileSystem(cluster.getJTClient().getConf());
-    if(!fs.exists(inDir)){
-      fs.create(inDir);
-    }
-    if (fs.exists(outDir)) {
-      fs.delete(outDir,true);
-    }
-  }
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    if(fs.exists(inDir)) {
-      fs.delete(inDir,true);
-    }    
-    if (fs.exists(outDir)) {
-      fs.delete(outDir,true);
-    }
-    cluster.tearDown();
-  }
-
-  /*
-   * The test case intention is to test the job failure due to system
-   * exceptions, so the exceptions are thrown intentionally and the job is
-   * verified for failure. At the end of the test, the verification is made
-   * that the success file is not present in the hdfs location. This is because
-   * the success file only should exist if the actual job had succeeded. 
-   * 
-   * @throws Exception in a case of test errors
-   */
-  @Test
-  public void testSystemJobKill() throws Exception {
-    wovenClient = cluster.getJTClient().getProxy();
-    Configuration conf = new Configuration(cluster.getConf());
-    conf.set(MRJobConfig.MAP_MAX_ATTEMPTS, "1");
-    conf.set(MRJobConfig.REDUCE_MAX_ATTEMPTS, "1");
-    // fail the mapper job
-    failJob(conf, JobKillCommitter.CommitterWithNoError.class, "JobMapperFail",
-        JobKillCommitter.MapperFail.class, JobKillCommitter.ReducerPass.class,
-        false);
-    // fail the reducer job
-    failJob(conf, JobKillCommitter.CommitterWithNoError.class,
-        "JobReducerFail", JobKillCommitter.MapperPass.class,
-        JobKillCommitter.ReducerFail.class,false);
-    // fail the set up job
-    failJob(conf, JobKillCommitter.CommitterWithFailSetup.class,
-        "JobSetupFail", JobKillCommitter.MapperPass.class,
-        JobKillCommitter.ReducerPass.class,false);
-    // fail the clean up job
-    failJob(conf, JobKillCommitter.CommitterWithFailCleanup.class,
-        "JobCleanupFail", JobKillCommitter.MapperPass.class,
-        JobKillCommitter.ReducerPass.class,false);
-  }
-
-  private void failJob(Configuration conf,
-      Class<? extends OutputCommitter> theClass, String confName,
-      Class<? extends Mapper> mapClass, Class<? extends Reducer> redClass,
-      boolean isUserKill)
-      throws Exception {
-    Job job = new Job(conf, confName);
-    job.setJarByClass(JobKillCommitter.class);
-    job.setMapperClass(mapClass);
-    job.setCombinerClass(redClass);
-    job.setMapOutputKeyClass(Text.class);
-    job.setMapOutputValueClass(Text.class);
-    job.setReducerClass(redClass);
-    job.setNumReduceTasks(1);
-    FileInputFormat.addInputPath(job, inDir);
-    FileOutputFormat.setOutputPath(job, outDir);
-    JobConf jconf = new JobConf(job.getConfiguration(), JobKillCommitter.class);
-    jconf.setOutputCommitter(theClass);
-    if(!isUserKill)
-    {  
-      RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
-      JobID id = rJob.getID();
-      JobInfo jInfo = wovenClient.getJobInfo(id);
-      Assert.assertTrue("Job is not in PREP state",
-          jInfo.getStatus().getRunState() == JobStatus.PREP);
-    }
-    else
-    {
-      //user kill job
-      RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
-      JobInfo info = wovenClient.getJobInfo(rJob.getID());
-      Assert.assertNotNull("Job Info is null",info);
-      JobID id = rJob.getID();
-      while (info.runningMaps() != 1) {
-        Thread.sleep(1000);
-        info = wovenClient.getJobInfo(id);
-      }
-      rJob.killJob();
-    }
-    checkCleanup(jconf);
-    deleteOutputDir();
-  }
-  
-  /**
-   * This test is used to kill the job by explicity calling the kill api
-   * and making sure the clean up happens
-   * @throws Exception
-   */
-  @Test
-  public void testUserJobKill() throws Exception{
-    wovenClient = cluster.getJTClient().getProxy();
-    Configuration conf = new Configuration(cluster.getConf());
-    conf.set(MRJobConfig.MAP_MAX_ATTEMPTS, "1");
-    conf.set(MRJobConfig.REDUCE_MAX_ATTEMPTS, "1");
-    // fail the mapper job
-    failJob(conf, JobKillCommitter.CommitterWithNoError.class, "JobUserKill",
-        JobKillCommitter.MapperPassSleep.class, 
-        JobKillCommitter.ReducerPass.class,true);    
-  }
-
-  private void checkCleanup(JobConf conf) throws Exception {
-    if (outDir != null) {
-      if (fs.exists(outDir)) {
-        Path filePath = new Path(outDir,
-            FileOutputCommitter.SUCCEEDED_FILE_NAME);
-        // check to make sure the success file is not there since the job
-        // failed.
-        Assert.assertTrue("The success file is present when the job failed",
-            !fs.exists(filePath));
-      }
-    }
-  }
-
-  private void deleteOutputDir() throws Exception {
-    if (fs != null) {
-      fs.delete(outDir, true);
-    }
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestPushConfig.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestPushConfig.java
deleted file mode 100644
index 293edc0..0000000
--- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestPushConfig.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-import java.io.File;
-import java.io.FileOutputStream;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.test.system.MRCluster;
-import org.apache.hadoop.test.system.AbstractDaemonClient;
-import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class TestPushConfig {
-  private static MRCluster cluster;
-  private String localConfDir = "localconf";
-  private static final Log LOG = LogFactory.getLog(
-      TestPushConfig.class.getName());
-  
-  @BeforeClass
-  public static void before() throws Exception {
-    String [] expExcludeList = new String[2];
-    expExcludeList[0] = "java.net.ConnectException";
-    expExcludeList[1] = "java.io.IOException";
-    
-    cluster = MRCluster.createCluster(new Configuration());
-    cluster.setExcludeExpList(expExcludeList);
-    cluster.setUp();
-  }
-
-  @AfterClass
-  public static void after() throws Exception {
-    cluster.tearDown();
-  }
-  
-  /**
-   * This test about testing the pushConfig feature. The pushConfig functionality
-   * available as part of the cluster process manager. The functionality takes
-   * in local input directory and pushes all the files from the local to the 
-   * remote conf directory. This functionality is required is change the config
-   * on the fly and restart the cluster which will be used by other test cases
-   * @throws Exception is thrown if pushConfig fails. 
-   */
-  @Test
-  public void testPushConfig() throws Exception {
-    final String DUMMY_CONFIG_STRING = "mapreduce.newdummy.conf";
-    final String DUMMY_CONFIG_STRING_VALUE = "HerriotTestRules";
-    Configuration origconf = new Configuration(cluster.getConf());
-    origconf.set(DUMMY_CONFIG_STRING, DUMMY_CONFIG_STRING_VALUE);
-    String localDir = HadoopDaemonRemoteCluster.getDeployedHadoopConfDir() + 
-        File.separator + localConfDir;
-    File lFile = new File(localDir);
-    if(!lFile.exists()){
-      lFile.mkdir();
-    }
-    String mapredConf = localDir + File.separator + "mapred-site.xml";
-    File file = new File(mapredConf);
-    origconf.writeXml(new FileOutputStream(file));    
-    Configuration daemonConf =  cluster.getJTClient().getProxy().getDaemonConf();
-    Assert.assertTrue("Dummy varialble is expected to be null before restart.",
-        daemonConf.get(DUMMY_CONFIG_STRING) == null);
-    String newDir = cluster.getClusterManager().pushConfig(localDir);
-    cluster.stop();
-    AbstractDaemonClient cli = cluster.getJTClient();
-    waitForClusterStop(cli);
-    // make sure the cluster has actually stopped
-    cluster.getClusterManager().start(newDir);
-    cli = cluster.getJTClient();
-    waitForClusterStart(cli);
-    // make sure the cluster has actually started
-    Configuration newconf = cluster.getJTClient().getProxy().getDaemonConf();
-    Assert.assertTrue("Extra varialble is expected to be set",
-        newconf.get(DUMMY_CONFIG_STRING).equals(DUMMY_CONFIG_STRING_VALUE));
-    cluster.getClusterManager().stop(newDir);
-    cli = cluster.getJTClient();
-    // make sure the cluster has actually stopped
-    waitForClusterStop(cli);
-    // start the daemons with original conf dir
-    cluster.getClusterManager().start();
-    cli = cluster.getJTClient();    
-    waitForClusterStart(cli);  
-    daemonConf =  cluster.getJTClient().getProxy().getDaemonConf();
-    Assert.assertTrue("Dummy variable is expected to be null after restart.",
-        daemonConf.get(DUMMY_CONFIG_STRING) == null);
-    lFile.delete();
-  }
-  
-  private void waitForClusterStop(AbstractDaemonClient cli) throws Exception {
-    int i=1;
-    while (i < 40) {
-      try {
-        cli.ping();
-        Thread.sleep(1000);
-        i++;
-      } catch (Exception e) {
-        break;
-      }
-    }
-    for (AbstractDaemonClient tcli : cluster.getTTClients()) {
-      i = 1;
-      while (i < 40) {
-        try {
-          tcli.ping();
-          Thread.sleep(1000);
-          i++;
-        } catch (Exception e) {
-          break;
-        }
-      }
-      if (i >= 40) {
-        Assert.fail("TT on " + tcli.getHostName() + " Should have been down.");
-      }
-    }
-  }
-  
-  private void waitForClusterStart(AbstractDaemonClient cli) throws Exception {
-    int i=1;
-    while (i < 40) {
-      try {
-        cli.ping();
-        break;
-      } catch (Exception e) {
-        i++;
-        Thread.sleep(1000);
-        LOG.info("Waiting for Jobtracker on host : "
-            + cli.getHostName() + " to come up.");
-      }
-    }
-    for (AbstractDaemonClient tcli : cluster.getTTClients()) {
-      i = 1;
-      while (i < 40) {
-        try {
-          tcli.ping();
-          break;
-        } catch (Exception e) {
-          i++;
-          Thread.sleep(1000);
-          LOG.info("Waiting for Tasktracker on host : "
-              + tcli.getHostName() + " to come up.");
-        }
-      }
-    }
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestSortValidate.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestSortValidate.java
deleted file mode 100644
index aa0e1c2..0000000
--- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestSortValidate.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import junit.framework.Assert;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.examples.RandomWriter;
-import org.apache.hadoop.examples.Sort;
-
-import org.apache.hadoop.mapreduce.test.system.JTProtocol;
-import org.apache.hadoop.mapreduce.test.system.JobInfo;
-import org.apache.hadoop.mapreduce.test.system.MRCluster;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * A System test to test the Map-Reduce framework's sort 
- * with a real Map-Reduce Cluster.
- */
-public class TestSortValidate {
-  // Input/Output paths for sort
-  private static final Path SORT_INPUT_PATH = new Path("inputDirectory");
-  private static final Path SORT_OUTPUT_PATH = new Path("outputDirectory");
-
-  // make it big enough to cause a spill in the map
-  private static final int RW_BYTES_PER_MAP = 3 * 1024 * 1024;
-  private static final int RW_MAPS_PER_HOST = 2;
-
-  private MRCluster cluster = null;
-  private FileSystem dfs = null;
-  private JobClient client = null;
-
-  private static final Log LOG = LogFactory.getLog(TestSortValidate.class);
-
-  public TestSortValidate()
-  throws Exception {
-    cluster = MRCluster.createCluster(new Configuration());
-  }
-
-  @Before
-  public void setUp() throws java.lang.Exception {
-    cluster.setUp();
-    client = cluster.getJTClient().getClient();
-
-    dfs = client.getFs();
-    dfs.delete(SORT_INPUT_PATH, true);
-    dfs.delete(SORT_OUTPUT_PATH, true);
-  }
-
-  @After
-  public void after() throws Exception {
-    cluster.tearDown();
-    dfs.delete(SORT_INPUT_PATH, true);
-    dfs.delete(SORT_OUTPUT_PATH, true);
-  }
-
-  public void runRandomWriter(Configuration job, Path sortInput) 
-  throws Exception {
-    // Scale down the default settings for RandomWriter for the test-case
-    // Generates NUM_HADOOP_SLAVES * RW_MAPS_PER_HOST * RW_BYTES_PER_MAP
-    job.setInt("test.randomwrite.bytes_per_map", RW_BYTES_PER_MAP);
-    job.setInt("test.randomwriter.maps_per_host", RW_MAPS_PER_HOST);
-    String[] rwArgs = {sortInput.toString()};
- 
-    runAndVerify(job,new RandomWriter(), rwArgs);
-  }
-
-  private void runAndVerify(Configuration job, Tool tool, String[] args)
-    throws Exception {
-
-    // This calculates the previous number fo jobs submitted before a new
-    // job gets submitted.
-    int prevJobsNum = 0;
-
-    // JTProtocol wovenClient
-    JTProtocol wovenClient = cluster.getJTClient().getProxy();
-
-    // JobStatus
-    JobStatus[] jobStatus = null;
-
-    // JobID
-    JobID id = null;
-
-    // RunningJob rJob;
-    RunningJob rJob = null;
-
-    // JobInfo jInfo;
-    JobInfo jInfo = null;
-
-    //Getting the previous job numbers that are submitted.
-    jobStatus = client.getAllJobs();
-    prevJobsNum = jobStatus.length;
-
-    // Run RandomWriter
-    Assert.assertEquals(ToolRunner.run(job, tool, args), 0);
-
-    //Waiting for the job to appear in the jobstatus
-    jobStatus = client.getAllJobs();
-
-    while (jobStatus.length - prevJobsNum == 0) {
-      LOG.info("Waiting for the job to appear in the jobStatus");
-      Thread.sleep(1000);
-      jobStatus = client.getAllJobs();
-    }
-
-    //Getting the jobId of the just submitted job
-    //The just submitted job is always added in the first slot of jobstatus
-    id = jobStatus[0].getJobID();
-
-    rJob = client.getJob(id);
-
-    jInfo = wovenClient.getJobInfo(id);
-
-    //Making sure that the job is complete.
-    while (jInfo != null && !jInfo.getStatus().isJobComplete()) {
-      Thread.sleep(10000);
-      jInfo = wovenClient.getJobInfo(id);
-    }
-
-    cluster.getJTClient().verifyCompletedJob(id);
-  }
-  
-  private void runSort(Configuration job, Path sortInput, Path sortOutput) 
-  throws Exception {
-
-    job.setInt("io.sort.mb", 1);
-
-    // Setup command-line arguments to 'sort'
-    String[] sortArgs = {sortInput.toString(), sortOutput.toString()};
-    
-    runAndVerify(job,new Sort(), sortArgs);
-
-  }
-  
-  private void runSortValidator(Configuration job, 
-                                       Path sortInput, Path sortOutput) 
-  throws Exception {
-    String[] svArgs = {"-sortInput", sortInput.toString(), 
-                       "-sortOutput", sortOutput.toString()};
-
-    runAndVerify(job,new SortValidator(), svArgs);
-
-  }
- 
-  @Test 
-  public void testMapReduceSort() throws Exception {
-    // Run randomwriter to generate input for 'sort'
-    runRandomWriter(cluster.getConf(), SORT_INPUT_PATH);
-
-    // Run sort
-    runSort(cluster.getConf(), SORT_INPUT_PATH, SORT_OUTPUT_PATH);
-
-    // Run sort-validator to check if sort worked correctly
-    runSortValidator(cluster.getConf(), SORT_INPUT_PATH, 
-                     SORT_OUTPUT_PATH);
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestTaskKilling.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestTaskKilling.java
deleted file mode 100644
index d84f41a..0000000
--- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestTaskKilling.java
+++ /dev/null
@@ -1,640 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
-import org.junit.Test;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.test.system.MRCluster;
-import org.apache.hadoop.mapreduce.test.system.JTProtocol;
-import org.apache.hadoop.mapreduce.test.system.JobInfo;
-import org.apache.hadoop.mapreduce.test.system.TaskInfo;
-import org.apache.hadoop.mapreduce.test.system.TTClient;
-import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction;
-import org.apache.hadoop.mapred.JobClient.NetworkedJob;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.SleepJob;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-
-/**
- * A System test for verifying the status after killing the tasks at different
- * conditions.
- */
-public class TestTaskKilling {
-  private static final Log LOG = LogFactory.getLog(TestTaskKilling.class);
-  private static MRCluster cluster;
-  private static JobClient jobClient = null;
-  private static JTProtocol remoteJTClient = null;
-
-  public TestTaskKilling() {
-  }
-
-  @BeforeClass
-  public static void before() throws Exception {
-    Configuration conf = new Configuration();
-    cluster = MRCluster.createCluster(conf);
-    cluster.setUp();
-    jobClient = cluster.getJTClient().getClient();
-    remoteJTClient = cluster.getJTClient().getProxy();
-  }
-
-  @AfterClass
-  public static void after() throws Exception {
-    cluster.tearDown();
-  }
-
-  /**
-   * Verifying the running job status whether it succeeds or not after failing
-   * some of its tasks.
-   * 
-   * @throws ClassNotFoundException
-   */
-  @Test
-  public void testFailedTaskJobStatus()
-      throws IOException, InterruptedException, ClassNotFoundException {
-    Configuration conf = new Configuration(cluster.getConf());
-    TaskInfo taskInfo = null;
-    SleepJob job = new SleepJob();
-    job.setConf(conf);
-    Job slpJob = job.createJob(3, 1, 4000, 4000, 100, 100);
-    JobConf jobConf = new JobConf(conf);
-    jobConf.setMaxMapAttempts(20);
-    jobConf.setMaxReduceAttempts(20);
-    slpJob.submit();
-    RunningJob runJob =
-        jobClient.getJob(org.apache.hadoop.mapred.JobID.downgrade(slpJob
-            .getJobID()));
-    JobID id = runJob.getID();
-    JobInfo jInfo = remoteJTClient.getJobInfo(id);
-    int counter = 0;
-    while (counter < 60) {
-      if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
-        break;
-      } else {
-        UtilsForTests.waitFor(1000);
-        jInfo = remoteJTClient.getJobInfo(id);
-      }
-      counter++;
-    }
-    Assert.assertTrue("Job has not been started for 1 min.", counter != 60);
-
-    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
-    for (TaskInfo taskinfo : taskInfos) {
-      if (!taskinfo.isSetupOrCleanup()) {
-        taskInfo = taskinfo;
-      }
-    }
-
-    counter = 0;
-    taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
-    while (counter < 60) {
-      if (taskInfo.getTaskStatus().length > 0) {
-        if (taskInfo.getTaskStatus()[0].getRunState() == TaskStatus.State.RUNNING) {
-          break;
-        }
-      }
-      UtilsForTests.waitFor(1000);
-      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
-      counter++;
-    }
-    Assert.assertTrue("Task has not been started for 1 min.", counter != 60);
-
-    NetworkedJob networkJob = new JobClient.NetworkedJob(jInfo.getStatus(),jobClient.cluster);
-    TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
-    TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
-    networkJob.killTask(taskAttID, false);
-
-    LOG.info("Waiting till the job is completed...");
-    while (!jInfo.getStatus().isJobComplete()) {
-      UtilsForTests.waitFor(100);
-      jInfo = remoteJTClient.getJobInfo(id);
-    }
-
-    Assert.assertEquals(
-        "JobStatus", jInfo.getStatus().getRunState(), JobStatus.SUCCEEDED);
-  }
-
-  /**
-   * Verifying whether task temporary output directory is cleaned up or not
-   * after killing the task.
-   */
-  @Test
-  public void testDirCleanupAfterTaskKilled()
-      throws IOException, InterruptedException {
-    TaskInfo taskInfo = null;
-    boolean isTempFolderExists = false;
-    String localTaskDir = null;
-    TTClient ttClient = null;
-    TaskID tID = null;
-    FileStatus filesStatus[] = null;
-    Path inputDir = new Path("input");
-    Path outputDir = new Path("output");
-    Configuration conf = new Configuration(cluster.getConf());
-    JobConf jconf = new JobConf(conf);
-    jconf.setJobName("Word Count");
-    jconf.setJarByClass(WordCount.class);
-    jconf.setMapperClass(WordCount.MapClass.class);
-    jconf.setCombinerClass(WordCount.Reduce.class);
-    jconf.setReducerClass(WordCount.Reduce.class);
-    jconf.setNumMapTasks(1);
-    jconf.setNumReduceTasks(1);
-    jconf.setMaxMapAttempts(20);
-    jconf.setMaxReduceAttempts(20);
-    jconf.setOutputKeyClass(Text.class);
-    jconf.setOutputValueClass(IntWritable.class);
-
-    cleanup(inputDir, conf);
-    cleanup(outputDir, conf);
-    createInput(inputDir, conf);
-    FileInputFormat.setInputPaths(jconf, inputDir);
-    FileOutputFormat.setOutputPath(jconf, outputDir);
-    RunningJob runJob = jobClient.submitJob(jconf);
-    JobID id = runJob.getID();
-    JobInfo jInfo = remoteJTClient.getJobInfo(id);
-    int counter = 0;
-    while (counter < 60) {
-      if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
-        break;
-      } else {
-        UtilsForTests.waitFor(1000);
-        jInfo = remoteJTClient.getJobInfo(id);
-      }
-      counter++;
-    }
-    Assert.assertTrue("Job has not been started for 1 min.", counter != 60);
-
-    JobStatus[] jobStatus = jobClient.getAllJobs();
-    String userName = jobStatus[0].getUsername();
-    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
-    for (TaskInfo taskinfo : taskInfos) {
-      if (!taskinfo.isSetupOrCleanup()) {
-        taskInfo = taskinfo;
-        break;
-      }
-    }
-
-    counter = 0;
-    while (counter < 30) {
-      if (taskInfo.getTaskStatus().length > 0) {
-        if (taskInfo.getTaskStatus()[0].getRunState() == TaskStatus.State.RUNNING) {
-          break;
-        }
-      }
-      UtilsForTests.waitFor(1000);
-      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
-      counter++;
-    }
-    Assert.assertTrue("Task has not been started for 30 sec.", counter != 30);
-
-    tID = TaskID.downgrade(taskInfo.getTaskID());
-    FinishTaskControlAction action = new FinishTaskControlAction(tID);
-
-    String[] taskTrackers = taskInfo.getTaskTrackers();
-    counter = 0;
-    while (counter < 30) {
-      if (taskTrackers.length != 0) {
-        break;
-      }
-      UtilsForTests.waitFor(100);
-      taskTrackers = taskInfo.getTaskTrackers();
-      counter++;
-    }
-
-    String hostName = taskTrackers[0].split("_")[1];
-    hostName = hostName.split(":")[0];
-    ttClient = cluster.getTTClient(hostName);
-    ttClient.getProxy().sendAction(action);
-    String localDirs[] = ttClient.getMapredLocalDirs();
-    TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
-    for (String localDir : localDirs) {
-      localTaskDir =
-          localDir
-              + "/"
-              + TaskTracker.getLocalTaskDir(userName, id.toString(), taskAttID
-                  .toString());
-      filesStatus = ttClient.listStatus(localTaskDir, true);
-      if (filesStatus.length > 0) {
-        isTempFolderExists = true;
-        NetworkedJob networkJob = new JobClient.NetworkedJob(jInfo.getStatus(),jobClient.cluster);
-        networkJob.killTask(taskAttID, false);
-        break;
-      }
-    }
-
-    Assert.assertTrue(
-        "Task Attempt directory "
-            + taskAttID + " has not been found while task was running.",
-        isTempFolderExists);
-    taskInfo = remoteJTClient.getTaskInfo(tID);
-
-    counter = 0;
-    while (counter < 60) {
-      UtilsForTests.waitFor(1000);
-      taskInfo = remoteJTClient.getTaskInfo(tID);
-      filesStatus = ttClient.listStatus(localTaskDir, true);
-      if (filesStatus.length == 0) {
-        break;
-      }
-      counter++;
-    }
-
-    Assert.assertTrue(
-        "Task attempt temporary folder has not been cleaned.",
-        isTempFolderExists && filesStatus.length == 0);
-    counter = 0;
-    while (counter < 30) {
-      UtilsForTests.waitFor(1000);
-      taskInfo = remoteJTClient.getTaskInfo(tID);
-      counter++;
-    }
-    taskInfo = remoteJTClient.getTaskInfo(tID);
-    Assert.assertEquals(
-        "Task status has not been changed to KILLED.", TaskStatus.State.KILLED,
-        taskInfo.getTaskStatus()[0].getRunState());
-  }
-
-  private void cleanup(Path dir, Configuration conf) throws IOException {
-    FileSystem fs = dir.getFileSystem(conf);
-    fs.delete(dir, true);
-  }
-
-  private void createInput(Path inDir, Configuration conf) throws IOException {
-    String input =
-        "Hadoop is framework for data intensive distributed "
-            + "applications.\n"
-            + "Hadoop enables applications to work with thousands of nodes.";
-    FileSystem fs = inDir.getFileSystem(conf);
-    if (!fs.mkdirs(inDir)) {
-      throw new IOException("Failed to create the input directory:"
-          + inDir.toString());
-    }
-    fs.setPermission(inDir, new FsPermission(
-        FsAction.ALL, FsAction.ALL, FsAction.ALL));
-    DataOutputStream file = fs.create(new Path(inDir, "data.txt"));
-    int i = 0;
-    while (i < 1000 * 3000) {
-      file.writeBytes(input);
-      i++;
-    }
-    file.close();
-  }
-
-  /**
-   * Verifying whether task temporary output directory is cleaned up or not
-   * after failing the task.
-   */
-  @Test
-  public void testDirCleanupAfterTaskFailed()
-      throws IOException, InterruptedException {
-    TTClient ttClient = null;
-    FileStatus filesStatus[] = null;
-    String localTaskDir = null;
-    TaskInfo taskInfo = null;
-    TaskID tID = null;
-    boolean isTempFolderExists = false;
-    Path inputDir = new Path("input");
-    Path outputDir = new Path("output");
-    Configuration conf = new Configuration(cluster.getConf());
-    JobConf jconf = new JobConf(conf);
-    jconf.setJobName("Task Failed job");
-    jconf.setJarByClass(UtilsForTests.class);
-    jconf.setMapperClass(FailedMapperClass.class);
-    jconf.setNumMapTasks(1);
-    jconf.setNumReduceTasks(0);
-    jconf.setMaxMapAttempts(1);
-    cleanup(inputDir, conf);
-    cleanup(outputDir, conf);
-    createInput(inputDir, conf);
-    FileInputFormat.setInputPaths(jconf, inputDir);
-    FileOutputFormat.setOutputPath(jconf, outputDir);
-    RunningJob runJob = jobClient.submitJob(jconf);
-    JobID id = runJob.getID();
-    JobInfo jInfo = remoteJTClient.getJobInfo(id);
-
-    int counter = 0;
-    while (counter < 60) {
-      if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
-        break;
-      } else {
-        UtilsForTests.waitFor(1000);
-        jInfo = remoteJTClient.getJobInfo(id);
-      }
-      counter++;
-    }
-    Assert.assertTrue("Job has not been started for 1 min.", counter != 60);
-
-    JobStatus[] jobStatus = jobClient.getAllJobs();
-    String userName = jobStatus[0].getUsername();
-    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
-    for (TaskInfo taskinfo : taskInfos) {
-      if (!taskinfo.isSetupOrCleanup()) {
-        taskInfo = taskinfo;
-        break;
-      }
-    }
-
-    tID = TaskID.downgrade(taskInfo.getTaskID());
-    FinishTaskControlAction action = new FinishTaskControlAction(tID);
-    String[] taskTrackers = taskInfo.getTaskTrackers();
-    counter = 0;
-    while (counter < 30) {
-      if (taskTrackers.length != 0) {
-        break;
-      }
-      UtilsForTests.waitFor(1000);
-      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
-      taskTrackers = taskInfo.getTaskTrackers();
-      counter++;
-    }
-    Assert.assertTrue("Task tracker not found.", taskTrackers.length != 0);
-    String hostName = taskTrackers[0].split("_")[1];
-    hostName = hostName.split(":")[0];
-    ttClient = cluster.getTTClient(hostName);
-    ttClient.getProxy().sendAction(action);
-
-    counter = 0;
-    while (counter < 60) {
-      if (taskInfo.getTaskStatus().length > 0) {
-        if (taskInfo.getTaskStatus()[0].getRunState() == TaskStatus.State.RUNNING) {
-          break;
-        }
-      }
-      UtilsForTests.waitFor(1000);
-      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
-      counter++;
-    }
-    Assert.assertTrue("Task has not been started for 1 min.", counter != 60);
-
-    String localDirs[] = ttClient.getMapredLocalDirs();
-    TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
-    for (String localDir : localDirs) {
-      localTaskDir =
-          localDir
-              + "/"
-              + TaskTracker.getLocalTaskDir(userName, id.toString(), taskAttID
-                  .toString());
-      filesStatus = ttClient.listStatus(localTaskDir, true);
-      if (filesStatus.length > 0) {
-        isTempFolderExists = true;
-        break;
-      }
-    }
-
-    taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
-    Assert.assertTrue(
-        "Task Attempt directory "
-            + taskAttID + " has not been found while task was running.",
-        isTempFolderExists);
-    counter = 0;
-    while (counter < 30) {
-      UtilsForTests.waitFor(1000);
-      taskInfo = remoteJTClient.getTaskInfo(tID);
-      counter++;
-    }
-
-    Assert.assertEquals("Task status has not been changed to FAILED.", taskInfo
-        .getTaskStatus()[0].getRunState(), TaskStatus.State.FAILED);
-
-    filesStatus = ttClient.listStatus(localTaskDir, true);
-    Assert.assertTrue(
-        "Temporary folder has not been cleanup.", filesStatus.length == 0);
-  }
-
-  public static class FailedMapperClass
-      implements Mapper<NullWritable, NullWritable, NullWritable, NullWritable> {
-    public void configure(JobConf job) {
-    }
-
-    public void map(
-        NullWritable key, NullWritable value,
-        OutputCollector<NullWritable, NullWritable> output, Reporter reporter)
-        throws IOException {
-      int counter = 0;
-      while (counter < 240) {
-        UtilsForTests.waitFor(1000);
-        counter++;
-      }
-      if (counter == 240) {
-        throw new IOException();
-      }
-    }
-
-    public void close() {
-    }
-  }
-
-  @Test
-  /**
-   * This tests verification of job killing by killing of all task 
-   * attempts of a particular task
-   * @param none
-   * @return void
-   */
-  public void testAllTaskAttemptKill() throws Exception {
-    Configuration conf = new Configuration(cluster.getConf());
-
-    JobStatus[] jobStatus = null;
-
-    SleepJob job = new SleepJob();
-    job.setConf(conf);
-    Job slpJob = job.createJob(3, 1, 40000, 1000, 100, 100);
-    JobConf jconf = new JobConf(conf);
-
-    // Submitting the job
-    slpJob.submit();
-    RunningJob rJob =
-        cluster.getJTClient().getClient().getJob(
-            org.apache.hadoop.mapred.JobID.downgrade(slpJob.getJobID()));
-
-    int MAX_MAP_TASK_ATTEMPTS =
-        Integer.parseInt(jconf.get(MRJobConfig.MAP_MAX_ATTEMPTS));
-
-    LOG.info("MAX_MAP_TASK_ATTEMPTS is : " + MAX_MAP_TASK_ATTEMPTS);
-
-    Assert.assertTrue(MAX_MAP_TASK_ATTEMPTS > 0);
-
-    TTClient tClient = null;
-    TTClient[] ttClients = null;
-
-    JobInfo jInfo = remoteJTClient.getJobInfo(rJob.getID());
-
-    // Assert if jobInfo is null
-    Assert.assertNotNull(jInfo.getStatus().getRunState());
-
-    // Wait for the job to start running.
-    while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
-      try {
-        Thread.sleep(10000);
-      } catch (InterruptedException e) {
-      }
-      ;
-      jInfo = remoteJTClient.getJobInfo(rJob.getID());
-    }
-
-    // Temporarily store the jobid to use it later for comparision.
-    JobID jobidStore = rJob.getID();
-    jobidStore = JobID.downgrade(jobidStore);
-    LOG.info("job id is :" + jobidStore.toString());
-
-    TaskInfo[] taskInfos = null;
-
-    // After making sure that the job is running,
-    // the test execution has to make sure that
-    // at least one task has started running before continuing.
-    boolean runningCount = false;
-    int count = 0;
-    do {
-      taskInfos = cluster.getJTClient().getProxy().getTaskInfo(rJob.getID());
-      runningCount = false;
-      for (TaskInfo taskInfo : taskInfos) {
-        TaskStatus[] taskStatuses = taskInfo.getTaskStatus();
-        if (taskStatuses.length > 0) {
-          LOG.info("taskStatuses[0].getRunState() is :"
-              + taskStatuses[0].getRunState());
-          if (taskStatuses[0].getRunState() == TaskStatus.State.RUNNING) {
-            runningCount = true;
-            break;
-          } else {
-            LOG.info("Sleeping 5 seconds");
-            Thread.sleep(5000);
-          }
-        }
-      }
-      count++;
-      // If the count goes beyond a point, then break; This is to avoid
-      // infinite loop under unforeseen circumstances. Testcase will anyway
-      // fail later.
-      if (count > 10) {
-        Assert.fail("Since the sleep count has reached beyond a point"
-            + "failing at this point");
-      }
-    } while (!runningCount);
-
-    // This whole module is about getting the task Attempt id
-    // of one task and killing it MAX_MAP_TASK_ATTEMPTS times,
-    // whenever it re-attempts to run.
-    String taskIdKilled = null;
-    for (int i = 0; i < MAX_MAP_TASK_ATTEMPTS; i++) {
-      taskInfos = cluster.getJTClient().getProxy().getTaskInfo(rJob.getID());
-
-      for (TaskInfo taskInfo : taskInfos) {
-        TaskAttemptID taskAttemptID;
-        if (!taskInfo.isSetupOrCleanup()) {
-          // This is the task which is going to be killed continously in
-          // all its task attempts.The first task is getting picked up.
-          TaskID taskid = TaskID.downgrade(taskInfo.getTaskID());
-          LOG.info("taskid is :" + taskid);
-          if (i == 0) {
-            taskIdKilled = taskid.toString();
-            taskAttemptID = new TaskAttemptID(taskid, i);
-            LOG.info("taskAttemptid going to be killed is : " + taskAttemptID);
-            (new JobClient.NetworkedJob(jInfo.getStatus(),jobClient.cluster)).killTask(
-                taskAttemptID, true);
-            checkTaskCompletionEvent(taskAttemptID, jInfo);
-            break;
-          } else {
-            if (taskIdKilled.equals(taskid.toString())) {
-              taskAttemptID = new TaskAttemptID(taskid, i);
-              LOG
-                  .info("taskAttemptid going to be killed is : "
-                      + taskAttemptID);
-              (new JobClient.NetworkedJob(jInfo.getStatus(),jobClient.cluster)).killTask(
-                  taskAttemptID, true);
-              checkTaskCompletionEvent(taskAttemptID, jInfo);
-              break;
-            }
-          }
-        }
-      }
-    }
-    // Making sure that the job is complete.
-    while (jInfo != null && !jInfo.getStatus().isJobComplete()) {
-      Thread.sleep(10000);
-      jInfo = remoteJTClient.getJobInfo(rJob.getID());
-    }
-
-    // Making sure that the correct jobstatus is got from all the jobs
-    jobStatus = jobClient.getAllJobs();
-    JobStatus jobStatusFound = null;
-    for (JobStatus jobStatusTmp : jobStatus) {
-      if (JobID.downgrade(jobStatusTmp.getJobID()).equals(jobidStore)) {
-        jobStatusFound = jobStatusTmp;
-        LOG.info("jobStatus found is :" + jobStatusFound.getJobId().toString());
-      }
-    }
-
-    // Making sure that the job has FAILED
-    Assert.assertEquals(
-        "The job should have failed at this stage", JobStatus.FAILED,
-        jobStatusFound.getRunState());
-  }
-
-  // This method checks if task Attemptid occurs in the list
-  // of tasks that are completed (killed) for a job.This is
-  // required because after issuing a kill comamnd, the task
-  // has to be killed and appear in the taskCompletion event.
-  // After this a new task attempt will start running in a
-  // matter of few seconds.
-  public void checkTaskCompletionEvent(
-      TaskAttemptID taskAttemptID, JobInfo jInfo) throws Exception {
-    boolean match = false;
-    int count = 0;
-    while (!match) {
-      TaskCompletionEvent[] taskCompletionEvents =
-          new JobClient.NetworkedJob(jInfo.getStatus(),jobClient.cluster)
-              .getTaskCompletionEvents(0);
-      for (TaskCompletionEvent taskCompletionEvent : taskCompletionEvents) {
-        if ((taskCompletionEvent.getTaskAttemptId().toString())
-            .equals(taskAttemptID.toString())) {
-          match = true;
-          // Sleeping for 10 seconds giving time for the next task
-          // attempt to run
-          Thread.sleep(10000);
-          break;
-        }
-      }
-      if (!match) {
-        LOG.info("Thread is sleeping for 10 seconds");
-        Thread.sleep(10000);
-        count++;
-      }
-      // If the count goes beyond a point, then break; This is to avoid
-      // infinite loop under unforeseen circumstances.Testcase will anyway
-      // fail later.
-      if (count > 10) {
-        Assert.fail("Since the task attemptid is not appearing in the"
-            + "TaskCompletionEvent, it seems this task attempt was not killed");
-      }
-    }
-  }
-}
diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestTaskOwner.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestTaskOwner.java
deleted file mode 100644
index 76d658d..0000000
--- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestTaskOwner.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.BufferedReader;
-import java.io.InputStreamReader;
-import java.util.StringTokenizer;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.hadoop.mapreduce.test.system.MRCluster;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import testjar.UserNamePermission;
-
-public class TestTaskOwner {
-  private static final Log LOG = LogFactory.getLog(TestTaskOwner.class);
-  private static Path outDir = new Path("output");
-  private static Path inDir = new Path("input");
-  public static MRCluster cluster;
-
-  // The role of this job is to write the user name to the output file
-  // which will be parsed
-
-  @BeforeClass
-  public static void setUp() throws java.lang.Exception {
-
-    cluster = MRCluster.createCluster(new Configuration());
-    cluster.setUp();
-    FileSystem fs = inDir.getFileSystem(cluster.getJTClient().getConf());
-    // Make sure that all is clean in case last tearDown wasn't successful
-    fs.delete(outDir, true);
-    fs.delete(inDir, true);
-
-    fs.create(inDir, true);
-  }
-
-  @Test
-  public void testProcessPermission() throws Exception {
-  // The user will submit a job which a plain old map reduce job
-  // this job will output the username of the task that is running
-  // in the cluster and we will authenticate whether matches
-  // with the job that is submitted by the same user.
-
-    Configuration conf = cluster.getJTClient().getConf();
-    Job job = new Job(conf, "user name check");
-
-    job.setJarByClass(UserNamePermission.class);
-    job.setMapperClass(UserNamePermission.UserNameMapper.class);
-    job.setCombinerClass(UserNamePermission.UserNameReducer.class);
-    job.setMapOutputKeyClass(Text.class);
-    job.setMapOutputValueClass(Text.class);
-
-    job.setReducerClass(UserNamePermission.UserNameReducer.class);
-    job.setNumReduceTasks(1);
-
-    FileInputFormat.addInputPath(job, inDir);
-    FileOutputFormat.setOutputPath(job, outDir);
-
-    job.waitForCompletion(true);
-
-    // now verify the user name that is written by the task tracker is same
-    // as the
-    // user name that was used to launch the task in the first place
-    FileSystem fs = outDir.getFileSystem(conf);
-
-    Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir,
-     new Utils.OutputFileUtils.OutputFilesFilter()));
-
-    for (int i = 0; i < fileList.length; ++i) {
-	  LOG.info("File list[" + i + "]" + ": " + fileList[i]);
-	  BufferedReader file = new BufferedReader(new InputStreamReader(fs
-      .open(fileList[i])));
-       String line = file.readLine();
-       while (line != null) {
-         StringTokenizer token = new StringTokenizer(line);
-         if (token.hasMoreTokens()) {
-           LOG.info("First token " + token.nextToken());
-           String userName = token.nextToken();
-
-           LOG.info("Next token " + userName);
-           Assert
-             .assertEquals(
-              "The user name did not match permission violation ",
-               userName, System.getProperty("user.name")
-              .toString());
-           break;
-         }
-        }
-        file.close();
-     }
-  }
-
-  @AfterClass
-  public static void tearDown() throws java.lang.Exception {
-    FileSystem fs = outDir.getFileSystem(cluster.getJTClient().getConf());
-    fs.delete(outDir, true);
-    fs.delete(inDir, true);
-    cluster.tearDown();
-   }
-}
-
-
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index 1c70425..6d24b2d 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
@@ -340,7 +343,7 @@
                     <!-- Using Unix script to preserve symlinks -->
                     <echo file="${project.build.directory}/dist-copynativelibs.sh">
 
-                      which cygpath 2> /dev/null
+                      which cygpath 2&gt; /dev/null
                       if [ $? = 1 ]; then
                         BUILD_DIR="${project.build.directory}"
                       else
@@ -377,7 +380,7 @@
                     <!-- Using Unix script to preserve symlinks -->
                     <echo file="${project.build.directory}/dist-maketar.sh">
 
-                      which cygpath 2> /dev/null
+                      which cygpath 2&gt; /dev/null
                       if [ $? = 1 ]; then
                         BUILD_DIR="${project.build.directory}"
                       else
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 06e33cf..1aa3e40 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
@@ -212,6 +215,11 @@
         <artifactId>hadoop-mapreduce-examples</artifactId>
         <version>${project.version}</version>
       </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-gridmix</artifactId>
+        <version>${project.version}</version>
+      </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
@@ -230,6 +238,11 @@
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-datajoin</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-rumen</artifactId>
         <version>${project.version}</version>
       </dependency>
diff --git a/hadoop-tools/hadoop-archives/pom.xml b/hadoop-tools/hadoop-archives/pom.xml
index 1560cb0..f472c6f 100644
--- a/hadoop-tools/hadoop-archives/pom.xml
+++ b/hadoop-tools/hadoop-archives/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-tools/hadoop-datajoin/pom.xml b/hadoop-tools/hadoop-datajoin/pom.xml
new file mode 100644
index 0000000..984682f
--- /dev/null
+++ b/hadoop-tools/hadoop-datajoin/pom.xml
@@ -0,0 +1,120 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project>
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-project</artifactId>
+    <version>3.0.0-SNAPSHOT</version>
+    <relativePath>../../hadoop-project</relativePath>
+  </parent>
+  <groupId>org.apache.hadoop</groupId>
+  <artifactId>hadoop-datajoin</artifactId>
+  <version>3.0.0-SNAPSHOT</version>
+  <description>Apache Hadoop Data Join</description>
+  <name>Apache Hadoop Data Join</name>
+  <packaging>jar</packaging>
+
+  <properties>
+    <hadoop.log.dir>${project.build.directory}/log</hadoop.log.dir>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-annotations</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-mapreduce-client-hs</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-mapreduce-client-core</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-tests</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>create-log-dir</id>
+            <phase>process-test-resources</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+            <configuration>
+              <target>
+                <delete dir="${test.build.data}"/>
+                <mkdir dir="${test.build.data}"/>
+                <mkdir dir="${hadoop.log.dir}"/>
+              </target>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jar-plugin</artifactId>
+       </plugin>
+    </plugins>
+  </build>
+</project>
+
diff --git a/hadoop-mapreduce-project/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ArrayListBackedIterator.java b/hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/ArrayListBackedIterator.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ArrayListBackedIterator.java
rename to hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/ArrayListBackedIterator.java
diff --git a/hadoop-mapreduce-project/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinJob.java b/hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/DataJoinJob.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinJob.java
rename to hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/DataJoinJob.java
diff --git a/hadoop-mapreduce-project/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinMapperBase.java b/hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/DataJoinMapperBase.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinMapperBase.java
rename to hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/DataJoinMapperBase.java
diff --git a/hadoop-mapreduce-project/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinReducerBase.java b/hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/DataJoinReducerBase.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinReducerBase.java
rename to hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/DataJoinReducerBase.java
diff --git a/hadoop-mapreduce-project/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/JobBase.java b/hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/JobBase.java
similarity index 96%
rename from hadoop-mapreduce-project/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/JobBase.java
rename to hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/JobBase.java
index dd34a4b..9ef21b30 100644
--- a/hadoop-mapreduce-project/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/JobBase.java
+++ b/hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/JobBase.java
@@ -51,7 +51,7 @@
    *          the value for the counter
    */
   protected void setLongValue(Object name, long value) {
-    this.longCounters.put(name, new Long(value));
+    this.longCounters.put(name, Long.valueOf(value));
   }
 
   /**
@@ -100,9 +100,9 @@
     Long val = this.longCounters.get(name);
     Long retv = null;
     if (val == null) {
-      retv = new Long(inc);
+      retv = Long.valueOf(inc);
     } else {
-      retv = new Long(val.longValue() + inc);
+      retv = Long.valueOf(val.longValue() + inc);
     }
     this.longCounters.put(name, retv);
     return retv;
diff --git a/hadoop-mapreduce-project/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ResetableIterator.java b/hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/ResetableIterator.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ResetableIterator.java
rename to hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/ResetableIterator.java
diff --git a/hadoop-mapreduce-project/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/TaggedMapOutput.java b/hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/TaggedMapOutput.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/TaggedMapOutput.java
rename to hadoop-tools/hadoop-datajoin/src/main/java/org/apache/hadoop/contrib/utils/join/TaggedMapOutput.java
diff --git a/hadoop-mapreduce-project/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/README.txt b/hadoop-tools/hadoop-datajoin/src/test/java/README.txt
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/README.txt
rename to hadoop-tools/hadoop-datajoin/src/test/java/README.txt
diff --git a/hadoop-mapreduce-project/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleDataJoinMapper.java b/hadoop-tools/hadoop-datajoin/src/test/java/SampleDataJoinMapper.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleDataJoinMapper.java
rename to hadoop-tools/hadoop-datajoin/src/test/java/SampleDataJoinMapper.java
diff --git a/hadoop-mapreduce-project/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleDataJoinReducer.java b/hadoop-tools/hadoop-datajoin/src/test/java/SampleDataJoinReducer.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleDataJoinReducer.java
rename to hadoop-tools/hadoop-datajoin/src/test/java/SampleDataJoinReducer.java
diff --git a/hadoop-mapreduce-project/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleTaggedMapOutput.java b/hadoop-tools/hadoop-datajoin/src/test/java/SampleTaggedMapOutput.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleTaggedMapOutput.java
rename to hadoop-tools/hadoop-datajoin/src/test/java/SampleTaggedMapOutput.java
diff --git a/hadoop-mapreduce-project/src/contrib/data_join/src/test/org/apache/hadoop/contrib/utils/join/TestDataJoin.java b/hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/TestDataJoin.java
similarity index 98%
rename from hadoop-mapreduce-project/src/contrib/data_join/src/test/org/apache/hadoop/contrib/utils/join/TestDataJoin.java
rename to hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/TestDataJoin.java
index 1eab959..37df5b0 100644
--- a/hadoop-mapreduce-project/src/contrib/data_join/src/test/org/apache/hadoop/contrib/utils/join/TestDataJoin.java
+++ b/hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/TestDataJoin.java
@@ -57,6 +57,7 @@
   public void testDataJoin() throws Exception {
     final int srcs = 4;
     JobConf job = new JobConf();
+    job.setBoolean("mapreduce.fileoutputcommitter.marksuccessfuljobs", false);
     Path base = cluster.getFileSystem().makeQualified(new Path("/inner"));
     Path[] src = writeSimpleSrc(base, job, srcs);
     job.setInputFormat(SequenceFileInputFormat.class);
diff --git a/hadoop-tools/hadoop-distcp/pom.xml b/hadoop-tools/hadoop-distcp/pom.xml
index 46e0c1a..aaf2d59 100644
--- a/hadoop-tools/hadoop-distcp/pom.xml
+++ b/hadoop-tools/hadoop-distcp/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-tools/hadoop-extras/pom.xml b/hadoop-tools/hadoop-extras/pom.xml
index da8bd15..1087b53 100644
--- a/hadoop-tools/hadoop-extras/pom.xml
+++ b/hadoop-tools/hadoop-extras/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-tools/hadoop-gridmix/pom.xml b/hadoop-tools/hadoop-gridmix/pom.xml
new file mode 100644
index 0000000..9537883
--- /dev/null
+++ b/hadoop-tools/hadoop-gridmix/pom.xml
@@ -0,0 +1,131 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project>
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-project</artifactId>
+    <version>3.0.0-SNAPSHOT</version>
+    <relativePath>../../hadoop-project</relativePath>
+  </parent>
+  <groupId>org.apache.hadoop</groupId>
+  <artifactId>hadoop-gridmix</artifactId>
+  <version>3.0.0-SNAPSHOT</version>
+  <description>Apache Hadoop Gridmix</description>
+  <name>Apache Hadoop Gridmix</name>
+  <packaging>jar</packaging>
+
+  <properties>
+    <hadoop.log.dir>${project.build.directory}/log</hadoop.log.dir>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-annotations</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-mapreduce-client-hs</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-mapreduce-client-core</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-rumen</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-tests</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>create-log-dir</id>
+            <phase>process-test-resources</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+            <configuration>
+              <target>
+                <delete dir="${test.build.data}"/>
+                <mkdir dir="${test.build.data}"/>
+                <mkdir dir="${hadoop.log.dir}"/>
+              </target>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jar-plugin</artifactId>
+         <configuration>
+          <archive>
+           <manifest>
+            <mainClass>org.apache.hadoop.tools.HadoopArchives</mainClass>
+           </manifest>
+         </archive>
+        </configuration>
+       </plugin>
+    </plugins>
+  </build>
+</project>
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/AvgRecordFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/AvgRecordFactory.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/AvgRecordFactory.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/AvgRecordFactory.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java
similarity index 98%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java
index 341767c..9568171 100644
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java
+++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java
@@ -23,7 +23,6 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.mapred.JobTracker;
 import org.apache.hadoop.mapred.gridmix.Statistics.ClusterStats;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 
@@ -114,4 +113,4 @@
   protected String getNamenodeInfo() {
     return namenodeInfo;
   }
-}
\ No newline at end of file
+}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/CompressionEmulationUtil.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/CompressionEmulationUtil.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/CompressionEmulationUtil.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/CompressionEmulationUtil.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/DistributedCacheEmulator.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/DistributedCacheEmulator.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/DistributedCacheEmulator.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/DistributedCacheEmulator.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/EchoUserResolver.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/EchoUserResolver.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/EchoUserResolver.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/EchoUserResolver.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/FilePool.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/FilePool.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/FilePool.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/FilePool.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/FileQueue.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/FileQueue.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/FileQueue.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/FileQueue.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateData.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GenerateData.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateData.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GenerateData.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Gridmix.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
similarity index 99%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
index eea90cf..4bdc001 100644
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
+++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
@@ -697,10 +697,10 @@
     }
   }
 
-  private <T> String getEnumValues(Enum<? extends T>[] e) {
+  private String getEnumValues(Enum<?>[] e) {
     StringBuilder sb = new StringBuilder();
     String sep = "";
-    for (Enum<? extends T> v : e) {
+    for (Enum<?> v : e) {
       sb.append(sep);
       sb.append(v.name());
       sep = "|";
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixJobSubmissionPolicy.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixJobSubmissionPolicy.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixJobSubmissionPolicy.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixJobSubmissionPolicy.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixKey.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixKey.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixKey.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixKey.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixRecord.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixRecord.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixRecord.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixRecord.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixSplit.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixSplit.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixSplit.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixSplit.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/InputStriper.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/InputStriper.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/InputStriper.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/InputStriper.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/IntermediateRecordFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/IntermediateRecordFactory.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/IntermediateRecordFactory.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/IntermediateRecordFactory.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobCreator.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobCreator.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobCreator.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobCreator.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobFactory.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobFactory.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobFactory.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobSubmitter.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobSubmitter.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobSubmitter.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobSubmitter.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/LoadJob.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadJob.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/LoadJob.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadJob.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/LoadSplit.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadSplit.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/LoadSplit.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadSplit.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Progressive.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Progressive.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Progressive.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Progressive.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/PseudoLocalFs.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/PseudoLocalFs.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/PseudoLocalFs.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/PseudoLocalFs.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RandomAlgorithms.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RandomAlgorithms.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RandomAlgorithms.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RandomAlgorithms.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ReadRecordFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReadRecordFactory.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ReadRecordFactory.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReadRecordFactory.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RecordFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RecordFactory.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RecordFactory.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RecordFactory.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RoundRobinUserResolver.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RoundRobinUserResolver.java
similarity index 96%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RoundRobinUserResolver.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RoundRobinUserResolver.java
index db643de..c06b0a2 100644
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RoundRobinUserResolver.java
+++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RoundRobinUserResolver.java
@@ -68,15 +68,18 @@
     try {
       in = new LineReader(fs.open(userloc));
       while (in.readLine(rawUgi) > 0) {//line is of the form username[,group]*
+        if(rawUgi.toString().trim().equals("")) {
+          continue; //Continue on empty line
+        }
         // e is end position of user name in this line
         int e = rawUgi.find(",");
-        if (rawUgi.getLength() == 0 || e == 0) {
+        if (e == 0) {
           throw new IOException("Missing username: " + rawUgi);
         }
         if (e == -1) {
           e = rawUgi.getLength();
         }
-        final String username = Text.decode(rawUgi.getBytes(), 0, e);
+        final String username = Text.decode(rawUgi.getBytes(), 0, e).trim();
         UserGroupInformation ugi = null;
         try {
           ugi = UserGroupInformation.createProxyUser(username,
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SleepJob.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SleepJob.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SleepJob.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SleepJob.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/StatListener.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StatListener.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/StatListener.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StatListener.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Statistics.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Statistics.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Statistics.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Statistics.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java
similarity index 99%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java
index df17ebd..4dcc1a2 100644
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java
+++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java
@@ -298,8 +298,6 @@
   /**
    * We try to use some light-weight mechanism to determine cluster load.
    *
-   * @param stats
-   * @param clusterStatus Cluster status
    * @throws java.io.IOException
    */
   protected void checkLoadAndGetSlotsToBackfill() 
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SubmitterUserResolver.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SubmitterUserResolver.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SubmitterUserResolver.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SubmitterUserResolver.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Summarizer.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Summarizer.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Summarizer.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Summarizer.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/UserResolver.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/UserResolver.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/UserResolver.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/UserResolver.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/CumulativeCpuUsageEmulatorPlugin.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/CumulativeCpuUsageEmulatorPlugin.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/CumulativeCpuUsageEmulatorPlugin.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/CumulativeCpuUsageEmulatorPlugin.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageEmulatorPlugin.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageEmulatorPlugin.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageEmulatorPlugin.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageEmulatorPlugin.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageMatcher.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageMatcher.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageMatcher.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageMatcher.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/TotalHeapUsageEmulatorPlugin.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/TotalHeapUsageEmulatorPlugin.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/TotalHeapUsageEmulatorPlugin.java
rename to hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/TotalHeapUsageEmulatorPlugin.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/DebugJobFactory.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DebugJobFactory.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/DebugJobFactory.java
rename to hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DebugJobFactory.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/DebugJobProducer.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DebugJobProducer.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/DebugJobProducer.java
rename to hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DebugJobProducer.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/GridmixTestUtils.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/GridmixTestUtils.java
similarity index 91%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/GridmixTestUtils.java
rename to hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/GridmixTestUtils.java
index 8b727d2..49f1709 100644
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/GridmixTestUtils.java
+++ b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/GridmixTestUtils.java
@@ -1,5 +1,7 @@
 package org.apache.hadoop.mapred.gridmix;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -32,6 +34,7 @@
  * limitations under the License.
  */
 public class GridmixTestUtils {
+  private static final Log LOG = LogFactory.getLog(GridmixTestUtils.class);
   static final Path DEST = new Path("/gridmix");
   static FileSystem dfs = null;
   static MiniDFSCluster dfsCluster = null;
@@ -69,15 +72,13 @@
       if(fs.exists(homeDirectory)) {
         fs.delete(homeDirectory,true);
       }
-      TestGridmixSubmission.LOG.info(
-        "Creating Home directory : " + homeDirectory);
+      LOG.info("Creating Home directory : " + homeDirectory);
       fs.mkdirs(homeDirectory);
       changePermission(user,homeDirectory, fs);
       Path stagingArea = 
         new Path(conf.get("mapreduce.jobtracker.staging.root.dir",
                           "/tmp/hadoop/mapred/staging"));
-      TestGridmixSubmission.LOG.info(
-        "Creating Staging root directory : " + stagingArea);
+      LOG.info("Creating Staging root directory : " + stagingArea);
       fs.mkdirs(stagingArea);
       fs.setPermission(stagingArea, new FsPermission((short) 0777));
     } catch (IOException ioe) {
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationUtils.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationUtils.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationUtils.java
rename to hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationUtils.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestFilePool.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestFilePool.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestFilePool.java
rename to hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestFilePool.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestFileQueue.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestFileQueue.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestFileQueue.java
rename to hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestFileQueue.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixMemoryEmulation.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixMemoryEmulation.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixMemoryEmulation.java
rename to hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixMemoryEmulation.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixRecord.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixRecord.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixRecord.java
rename to hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixRecord.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSummary.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixSummary.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSummary.java
rename to hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixSummary.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestHighRamJob.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestHighRamJob.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestHighRamJob.java
rename to hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestHighRamJob.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestPseudoLocalFs.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestPseudoLocalFs.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestPseudoLocalFs.java
rename to hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestPseudoLocalFs.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java
rename to hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestRandomTextDataGenerator.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestRandomTextDataGenerator.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestRandomTextDataGenerator.java
rename to hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestRandomTextDataGenerator.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestRecordFactory.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestRecordFactory.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestRecordFactory.java
rename to hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestRecordFactory.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestResourceUsageEmulators.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestResourceUsageEmulators.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestResourceUsageEmulators.java
rename to hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestResourceUsageEmulators.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestUserResolve.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestUserResolve.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestUserResolve.java
rename to hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestUserResolve.java
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/data/wordcount.json.gz b/hadoop-tools/hadoop-gridmix/src/test/resources/data/wordcount.json.gz
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/gridmix/src/test/data/wordcount.json.gz
rename to hadoop-tools/hadoop-gridmix/src/test/resources/data/wordcount.json.gz
Binary files differ
diff --git a/hadoop-tools/hadoop-rumen/pom.xml b/hadoop-tools/hadoop-rumen/pom.xml
index 8f5fddd..abeaf53 100644
--- a/hadoop-tools/hadoop-rumen/pom.xml
+++ b/hadoop-tools/hadoop-rumen/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-tools/hadoop-streaming/pom.xml b/hadoop-tools/hadoop-streaming/pom.xml
index 0ab9f6c..86e4e3d 100644
--- a/hadoop-tools/hadoop-streaming/pom.xml
+++ b/hadoop-tools/hadoop-streaming/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml
index 46d1c19..330cfd3 100644
--- a/hadoop-tools/hadoop-tools-dist/pom.xml
+++ b/hadoop-tools/hadoop-tools-dist/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
@@ -54,9 +57,19 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-datajoin</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-extras</artifactId>
       <scope>compile</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-gridmix</artifactId>
+      <scope>compile</scope>
+    </dependency>
   </dependencies>
 
   <build>
diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml
index dfa9049..3166b9e 100644
--- a/hadoop-tools/pom.xml
+++ b/hadoop-tools/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
@@ -32,6 +35,8 @@
     <module>hadoop-distcp</module>
     <module>hadoop-archives</module>
     <module>hadoop-rumen</module>
+    <module>hadoop-gridmix</module>
+    <module>hadoop-datajoin</module>
     <module>hadoop-tools-dist</module>
     <module>hadoop-extras</module>
   </modules>
diff --git a/pom.xml b/pom.xml
index efdf3f9..67ef585 100644
--- a/pom.xml
+++ b/pom.xml
@@ -12,7 +12,9 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-main</artifactId>